4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/types.h>
43 #include <rte_ethdev.h>
44 #include <rte_malloc.h>
45 #include <rte_memcpy.h>
46 #include <rte_string_fns.h>
47 #include <rte_cycles.h>
48 #include <rte_kvargs.h>
50 #include <rte_ethdev.h>
52 #include <fslmc_logs.h>
53 #include <mc/fsl_dpbp.h>
54 #include <portal/dpaa2_hw_pvt.h>
55 #include <portal/dpaa2_hw_dpio.h>
56 #include "dpaa2_hw_mempool.h"
58 struct dpaa2_bp_info rte_dpaa2_bpid_info
[MAX_BPID
];
59 static struct dpaa2_bp_list
*h_bp_list
;
62 rte_hw_mbuf_create_pool(struct rte_mempool
*mp
)
64 struct dpaa2_bp_list
*bp_list
;
65 struct dpaa2_dpbp_dev
*avail_dpbp
;
66 struct dpbp_attr dpbp_attr
;
70 avail_dpbp
= dpaa2_alloc_dpbp_dev();
73 PMD_DRV_LOG(ERR
, "DPAA2 resources not available");
77 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
78 ret
= dpaa2_affine_qbman_swp();
80 RTE_LOG(ERR
, PMD
, "Failure in affining portal\n");
85 ret
= dpbp_enable(&avail_dpbp
->dpbp
, CMD_PRI_LOW
, avail_dpbp
->token
);
87 PMD_INIT_LOG(ERR
, "Resource enable failure with"
88 " err code: %d\n", ret
);
92 ret
= dpbp_get_attributes(&avail_dpbp
->dpbp
, CMD_PRI_LOW
,
93 avail_dpbp
->token
, &dpbp_attr
);
95 PMD_INIT_LOG(ERR
, "Resource read failure with"
96 " err code: %d\n", ret
);
98 ret
= dpbp_disable(&avail_dpbp
->dpbp
, CMD_PRI_LOW
,
103 /* Allocate the bp_list which will be added into global_bp_list */
104 bp_list
= rte_malloc(NULL
, sizeof(struct dpaa2_bp_list
),
105 RTE_CACHE_LINE_SIZE
);
107 PMD_INIT_LOG(ERR
, "No heap memory available");
111 /* Set parameters of buffer pool list */
112 bp_list
->buf_pool
.num_bufs
= mp
->size
;
113 bp_list
->buf_pool
.size
= mp
->elt_size
114 - sizeof(struct rte_mbuf
) - rte_pktmbuf_priv_size(mp
);
115 bp_list
->buf_pool
.bpid
= dpbp_attr
.bpid
;
116 bp_list
->buf_pool
.h_bpool_mem
= NULL
;
117 bp_list
->buf_pool
.dpbp_node
= avail_dpbp
;
118 /* Identification for our offloaded pool_data structure */
119 bp_list
->dpaa2_ops_index
= mp
->ops_index
;
120 bp_list
->next
= h_bp_list
;
123 bpid
= dpbp_attr
.bpid
;
125 rte_dpaa2_bpid_info
[bpid
].meta_data_size
= sizeof(struct rte_mbuf
)
126 + rte_pktmbuf_priv_size(mp
);
127 rte_dpaa2_bpid_info
[bpid
].bp_list
= bp_list
;
128 rte_dpaa2_bpid_info
[bpid
].bpid
= bpid
;
130 mp
->pool_data
= (void *)&rte_dpaa2_bpid_info
[bpid
];
132 PMD_INIT_LOG(DEBUG
, "BP List created for bpid =%d", dpbp_attr
.bpid
);
139 rte_hw_mbuf_free_pool(struct rte_mempool
*mp
)
141 struct dpaa2_bp_info
*bpinfo
;
142 struct dpaa2_bp_list
*bp
;
143 struct dpaa2_dpbp_dev
*dpbp_node
;
145 if (!mp
->pool_data
) {
146 PMD_DRV_LOG(ERR
, "Not a valid dpaa22 pool");
150 bpinfo
= (struct dpaa2_bp_info
*)mp
->pool_data
;
151 bp
= bpinfo
->bp_list
;
152 dpbp_node
= bp
->buf_pool
.dpbp_node
;
154 dpbp_disable(&(dpbp_node
->dpbp
), CMD_PRI_LOW
, dpbp_node
->token
);
156 if (h_bp_list
== bp
) {
157 h_bp_list
= h_bp_list
->next
;
158 } else { /* if it is not the first node */
159 struct dpaa2_bp_list
*prev
= h_bp_list
, *temp
;
160 temp
= h_bp_list
->next
;
163 prev
->next
= temp
->next
;
172 dpaa2_free_dpbp_dev(dpbp_node
);
176 rte_dpaa2_mbuf_release(struct rte_mempool
*pool __rte_unused
,
177 void * const *obj_table
,
179 uint32_t meta_data_size
,
182 struct qbman_release_desc releasedesc
;
183 struct qbman_swp
*swp
;
186 uint64_t bufs
[DPAA2_MBUF_MAX_ACQ_REL
];
188 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
189 ret
= dpaa2_affine_qbman_swp();
191 RTE_LOG(ERR
, PMD
, "Failed to allocate IO portal");
195 swp
= DPAA2_PER_LCORE_PORTAL
;
197 /* Create a release descriptor required for releasing
200 qbman_release_desc_clear(&releasedesc
);
201 qbman_release_desc_set_bpid(&releasedesc
, bpid
);
203 n
= count
% DPAA2_MBUF_MAX_ACQ_REL
;
207 /* convert mbuf to buffers for the remainder */
208 for (i
= 0; i
< n
; i
++) {
209 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
210 bufs
[i
] = (uint64_t)rte_mempool_virt2phy(pool
, obj_table
[i
])
213 bufs
[i
] = (uint64_t)obj_table
[i
] + meta_data_size
;
217 /* feed them to bman */
219 ret
= qbman_swp_release(swp
, &releasedesc
, bufs
, n
);
220 } while (ret
== -EBUSY
);
223 /* if there are more buffers to free */
225 /* convert mbuf to buffers */
226 for (i
= 0; i
< DPAA2_MBUF_MAX_ACQ_REL
; i
++) {
227 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
229 rte_mempool_virt2phy(pool
, obj_table
[n
+ i
])
232 bufs
[i
] = (uint64_t)obj_table
[n
+ i
] + meta_data_size
;
237 ret
= qbman_swp_release(swp
, &releasedesc
, bufs
,
238 DPAA2_MBUF_MAX_ACQ_REL
);
239 } while (ret
== -EBUSY
);
240 n
+= DPAA2_MBUF_MAX_ACQ_REL
;
245 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool
*pool
,
246 void **obj_table
, unsigned int count
)
248 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
251 struct qbman_swp
*swp
;
253 uint64_t bufs
[DPAA2_MBUF_MAX_ACQ_REL
];
256 struct dpaa2_bp_info
*bp_info
;
258 bp_info
= mempool_to_bpinfo(pool
);
260 if (!(bp_info
->bp_list
)) {
261 RTE_LOG(ERR
, PMD
, "DPAA2 buffer pool not configured\n");
265 bpid
= bp_info
->bpid
;
267 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
268 ret
= dpaa2_affine_qbman_swp();
270 RTE_LOG(ERR
, PMD
, "Failed to allocate IO portal");
274 swp
= DPAA2_PER_LCORE_PORTAL
;
277 /* Acquire is all-or-nothing, so we drain in 7s,
278 * then the remainder.
280 if ((count
- n
) > DPAA2_MBUF_MAX_ACQ_REL
) {
281 ret
= qbman_swp_acquire(swp
, bpid
, bufs
,
282 DPAA2_MBUF_MAX_ACQ_REL
);
284 ret
= qbman_swp_acquire(swp
, bpid
, bufs
,
287 /* In case of less than requested number of buffers available
288 * in pool, qbman_swp_acquire returns 0
291 PMD_TX_LOG(ERR
, "Buffer acquire failed with"
292 " err code: %d", ret
);
293 /* The API expect the exact number of requested bufs */
294 /* Releasing all buffers allocated */
295 rte_dpaa2_mbuf_release(pool
, obj_table
, bpid
,
296 bp_info
->meta_data_size
, n
);
299 /* assigning mbuf from the acquired objects */
300 for (i
= 0; (i
< ret
) && bufs
[i
]; i
++) {
301 DPAA2_MODIFY_IOVA_TO_VADDR(bufs
[i
], uint64_t);
302 obj_table
[n
] = (struct rte_mbuf
*)
303 (bufs
[i
] - bp_info
->meta_data_size
);
304 PMD_TX_LOG(DEBUG
, "Acquired %p address %p from BMAN",
305 (void *)bufs
[i
], (void *)obj_table
[n
]);
310 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
312 PMD_TX_LOG(DEBUG
, "Total = %d , req = %d done = %d",
319 rte_hw_mbuf_free_bulk(struct rte_mempool
*pool
,
320 void * const *obj_table
, unsigned int n
)
322 struct dpaa2_bp_info
*bp_info
;
324 bp_info
= mempool_to_bpinfo(pool
);
325 if (!(bp_info
->bp_list
)) {
326 RTE_LOG(ERR
, PMD
, "DPAA2 buffer pool not configured");
329 rte_dpaa2_mbuf_release(pool
, obj_table
, bp_info
->bpid
,
330 bp_info
->meta_data_size
, n
);
336 rte_hw_mbuf_get_count(const struct rte_mempool
*mp
)
339 unsigned int num_of_bufs
= 0;
340 struct dpaa2_bp_info
*bp_info
;
341 struct dpaa2_dpbp_dev
*dpbp_node
;
343 if (!mp
|| !mp
->pool_data
) {
344 RTE_LOG(ERR
, PMD
, "Invalid mempool provided");
348 bp_info
= (struct dpaa2_bp_info
*)mp
->pool_data
;
349 dpbp_node
= bp_info
->bp_list
->buf_pool
.dpbp_node
;
351 ret
= dpbp_get_num_free_bufs(&dpbp_node
->dpbp
, CMD_PRI_LOW
,
352 dpbp_node
->token
, &num_of_bufs
);
354 RTE_LOG(ERR
, PMD
, "Unable to obtain free buf count (err=%d)",
359 RTE_LOG(DEBUG
, PMD
, "Free bufs = %u", num_of_bufs
);
364 struct rte_mempool_ops dpaa2_mpool_ops
= {
366 .alloc
= rte_hw_mbuf_create_pool
,
367 .free
= rte_hw_mbuf_free_pool
,
368 .enqueue
= rte_hw_mbuf_free_bulk
,
369 .dequeue
= rte_dpaa2_mbuf_alloc_bulk
,
370 .get_count
= rte_hw_mbuf_get_count
,
373 MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops
);