]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. | |
5 | * Copyright (c) 2016 NXP. All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Freescale Semiconductor, Inc nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <unistd.h> | |
35 | #include <stdio.h> | |
36 | #include <sys/types.h> | |
37 | #include <string.h> | |
38 | #include <stdlib.h> | |
39 | #include <fcntl.h> | |
40 | #include <errno.h> | |
41 | ||
42 | #include <rte_mbuf.h> | |
43 | #include <rte_ethdev.h> | |
44 | #include <rte_malloc.h> | |
45 | #include <rte_memcpy.h> | |
46 | #include <rte_string_fns.h> | |
47 | #include <rte_cycles.h> | |
48 | #include <rte_kvargs.h> | |
49 | #include <rte_dev.h> | |
50 | #include <rte_ethdev.h> | |
51 | ||
52 | #include <fslmc_logs.h> | |
53 | #include <mc/fsl_dpbp.h> | |
54 | #include <portal/dpaa2_hw_pvt.h> | |
55 | #include <portal/dpaa2_hw_dpio.h> | |
56 | #include "dpaa2_hw_mempool.h" | |
57 | ||
58 | struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID]; | |
59 | static struct dpaa2_bp_list *h_bp_list; | |
60 | ||
61 | static int | |
62 | rte_hw_mbuf_create_pool(struct rte_mempool *mp) | |
63 | { | |
64 | struct dpaa2_bp_list *bp_list; | |
65 | struct dpaa2_dpbp_dev *avail_dpbp; | |
66 | struct dpbp_attr dpbp_attr; | |
67 | uint32_t bpid; | |
68 | int ret, p_ret; | |
69 | ||
70 | avail_dpbp = dpaa2_alloc_dpbp_dev(); | |
71 | ||
72 | if (!avail_dpbp) { | |
73 | PMD_DRV_LOG(ERR, "DPAA2 resources not available"); | |
74 | return -ENOENT; | |
75 | } | |
76 | ||
77 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
78 | ret = dpaa2_affine_qbman_swp(); | |
79 | if (ret) { | |
80 | RTE_LOG(ERR, PMD, "Failure in affining portal\n"); | |
81 | return ret; | |
82 | } | |
83 | } | |
84 | ||
85 | ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); | |
86 | if (ret != 0) { | |
87 | PMD_INIT_LOG(ERR, "Resource enable failure with" | |
88 | " err code: %d\n", ret); | |
89 | return ret; | |
90 | } | |
91 | ||
92 | ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, | |
93 | avail_dpbp->token, &dpbp_attr); | |
94 | if (ret != 0) { | |
95 | PMD_INIT_LOG(ERR, "Resource read failure with" | |
96 | " err code: %d\n", ret); | |
97 | p_ret = ret; | |
98 | ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, | |
99 | avail_dpbp->token); | |
100 | return p_ret; | |
101 | } | |
102 | ||
103 | /* Allocate the bp_list which will be added into global_bp_list */ | |
104 | bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list), | |
105 | RTE_CACHE_LINE_SIZE); | |
106 | if (!bp_list) { | |
107 | PMD_INIT_LOG(ERR, "No heap memory available"); | |
108 | return -ENOMEM; | |
109 | } | |
110 | ||
111 | /* Set parameters of buffer pool list */ | |
112 | bp_list->buf_pool.num_bufs = mp->size; | |
113 | bp_list->buf_pool.size = mp->elt_size | |
114 | - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); | |
115 | bp_list->buf_pool.bpid = dpbp_attr.bpid; | |
116 | bp_list->buf_pool.h_bpool_mem = NULL; | |
117 | bp_list->buf_pool.dpbp_node = avail_dpbp; | |
118 | /* Identification for our offloaded pool_data structure */ | |
119 | bp_list->dpaa2_ops_index = mp->ops_index; | |
120 | bp_list->next = h_bp_list; | |
121 | bp_list->mp = mp; | |
122 | ||
123 | bpid = dpbp_attr.bpid; | |
124 | ||
125 | rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) | |
126 | + rte_pktmbuf_priv_size(mp); | |
127 | rte_dpaa2_bpid_info[bpid].bp_list = bp_list; | |
128 | rte_dpaa2_bpid_info[bpid].bpid = bpid; | |
129 | ||
130 | mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid]; | |
131 | ||
132 | PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid); | |
133 | ||
134 | h_bp_list = bp_list; | |
135 | return 0; | |
136 | } | |
137 | ||
138 | static void | |
139 | rte_hw_mbuf_free_pool(struct rte_mempool *mp) | |
140 | { | |
141 | struct dpaa2_bp_info *bpinfo; | |
142 | struct dpaa2_bp_list *bp; | |
143 | struct dpaa2_dpbp_dev *dpbp_node; | |
144 | ||
145 | if (!mp->pool_data) { | |
146 | PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool"); | |
147 | return; | |
148 | } | |
149 | ||
150 | bpinfo = (struct dpaa2_bp_info *)mp->pool_data; | |
151 | bp = bpinfo->bp_list; | |
152 | dpbp_node = bp->buf_pool.dpbp_node; | |
153 | ||
154 | dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token); | |
155 | ||
156 | if (h_bp_list == bp) { | |
157 | h_bp_list = h_bp_list->next; | |
158 | } else { /* if it is not the first node */ | |
159 | struct dpaa2_bp_list *prev = h_bp_list, *temp; | |
160 | temp = h_bp_list->next; | |
161 | while (temp) { | |
162 | if (temp == bp) { | |
163 | prev->next = temp->next; | |
164 | free(bp); | |
165 | break; | |
166 | } | |
167 | prev = temp; | |
168 | temp = temp->next; | |
169 | } | |
170 | } | |
171 | ||
172 | dpaa2_free_dpbp_dev(dpbp_node); | |
173 | } | |
174 | ||
175 | static void | |
176 | rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, | |
177 | void * const *obj_table, | |
178 | uint32_t bpid, | |
179 | uint32_t meta_data_size, | |
180 | int count) | |
181 | { | |
182 | struct qbman_release_desc releasedesc; | |
183 | struct qbman_swp *swp; | |
184 | int ret; | |
185 | int i, n; | |
186 | uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; | |
187 | ||
188 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
189 | ret = dpaa2_affine_qbman_swp(); | |
190 | if (ret != 0) { | |
191 | RTE_LOG(ERR, PMD, "Failed to allocate IO portal"); | |
192 | return; | |
193 | } | |
194 | } | |
195 | swp = DPAA2_PER_LCORE_PORTAL; | |
196 | ||
197 | /* Create a release descriptor required for releasing | |
198 | * buffers into QBMAN | |
199 | */ | |
200 | qbman_release_desc_clear(&releasedesc); | |
201 | qbman_release_desc_set_bpid(&releasedesc, bpid); | |
202 | ||
203 | n = count % DPAA2_MBUF_MAX_ACQ_REL; | |
204 | if (unlikely(!n)) | |
205 | goto aligned; | |
206 | ||
207 | /* convert mbuf to buffers for the remainder */ | |
208 | for (i = 0; i < n ; i++) { | |
209 | #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA | |
210 | bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i]) | |
211 | + meta_data_size; | |
212 | #else | |
213 | bufs[i] = (uint64_t)obj_table[i] + meta_data_size; | |
214 | #endif | |
215 | } | |
216 | ||
217 | /* feed them to bman */ | |
218 | do { | |
219 | ret = qbman_swp_release(swp, &releasedesc, bufs, n); | |
220 | } while (ret == -EBUSY); | |
221 | ||
222 | aligned: | |
223 | /* if there are more buffers to free */ | |
224 | while (n < count) { | |
225 | /* convert mbuf to buffers */ | |
226 | for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) { | |
227 | #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA | |
228 | bufs[i] = (uint64_t) | |
229 | rte_mempool_virt2phy(pool, obj_table[n + i]) | |
230 | + meta_data_size; | |
231 | #else | |
232 | bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size; | |
233 | #endif | |
234 | } | |
235 | ||
236 | do { | |
237 | ret = qbman_swp_release(swp, &releasedesc, bufs, | |
238 | DPAA2_MBUF_MAX_ACQ_REL); | |
239 | } while (ret == -EBUSY); | |
240 | n += DPAA2_MBUF_MAX_ACQ_REL; | |
241 | } | |
242 | } | |
243 | ||
244 | int | |
245 | rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, | |
246 | void **obj_table, unsigned int count) | |
247 | { | |
248 | #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER | |
249 | static int alloc; | |
250 | #endif | |
251 | struct qbman_swp *swp; | |
252 | uint16_t bpid; | |
253 | uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; | |
254 | int i, ret; | |
255 | unsigned int n = 0; | |
256 | struct dpaa2_bp_info *bp_info; | |
257 | ||
258 | bp_info = mempool_to_bpinfo(pool); | |
259 | ||
260 | if (!(bp_info->bp_list)) { | |
261 | RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); | |
262 | return -ENOENT; | |
263 | } | |
264 | ||
265 | bpid = bp_info->bpid; | |
266 | ||
267 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
268 | ret = dpaa2_affine_qbman_swp(); | |
269 | if (ret != 0) { | |
270 | RTE_LOG(ERR, PMD, "Failed to allocate IO portal"); | |
271 | return ret; | |
272 | } | |
273 | } | |
274 | swp = DPAA2_PER_LCORE_PORTAL; | |
275 | ||
276 | while (n < count) { | |
277 | /* Acquire is all-or-nothing, so we drain in 7s, | |
278 | * then the remainder. | |
279 | */ | |
280 | if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { | |
281 | ret = qbman_swp_acquire(swp, bpid, bufs, | |
282 | DPAA2_MBUF_MAX_ACQ_REL); | |
283 | } else { | |
284 | ret = qbman_swp_acquire(swp, bpid, bufs, | |
285 | count - n); | |
286 | } | |
287 | /* In case of less than requested number of buffers available | |
288 | * in pool, qbman_swp_acquire returns 0 | |
289 | */ | |
290 | if (ret <= 0) { | |
291 | PMD_TX_LOG(ERR, "Buffer acquire failed with" | |
292 | " err code: %d", ret); | |
293 | /* The API expect the exact number of requested bufs */ | |
294 | /* Releasing all buffers allocated */ | |
295 | rte_dpaa2_mbuf_release(pool, obj_table, bpid, | |
296 | bp_info->meta_data_size, n); | |
297 | return ret; | |
298 | } | |
299 | /* assigning mbuf from the acquired objects */ | |
300 | for (i = 0; (i < ret) && bufs[i]; i++) { | |
301 | DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t); | |
302 | obj_table[n] = (struct rte_mbuf *) | |
303 | (bufs[i] - bp_info->meta_data_size); | |
304 | PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN", | |
305 | (void *)bufs[i], (void *)obj_table[n]); | |
306 | n++; | |
307 | } | |
308 | } | |
309 | ||
310 | #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER | |
311 | alloc += n; | |
312 | PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d", | |
313 | alloc, count, n); | |
314 | #endif | |
315 | return 0; | |
316 | } | |
317 | ||
318 | static int | |
319 | rte_hw_mbuf_free_bulk(struct rte_mempool *pool, | |
320 | void * const *obj_table, unsigned int n) | |
321 | { | |
322 | struct dpaa2_bp_info *bp_info; | |
323 | ||
324 | bp_info = mempool_to_bpinfo(pool); | |
325 | if (!(bp_info->bp_list)) { | |
326 | RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured"); | |
327 | return -ENOENT; | |
328 | } | |
329 | rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid, | |
330 | bp_info->meta_data_size, n); | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
335 | static unsigned int | |
336 | rte_hw_mbuf_get_count(const struct rte_mempool *mp) | |
337 | { | |
338 | int ret; | |
339 | unsigned int num_of_bufs = 0; | |
340 | struct dpaa2_bp_info *bp_info; | |
341 | struct dpaa2_dpbp_dev *dpbp_node; | |
342 | ||
343 | if (!mp || !mp->pool_data) { | |
344 | RTE_LOG(ERR, PMD, "Invalid mempool provided"); | |
345 | return 0; | |
346 | } | |
347 | ||
348 | bp_info = (struct dpaa2_bp_info *)mp->pool_data; | |
349 | dpbp_node = bp_info->bp_list->buf_pool.dpbp_node; | |
350 | ||
351 | ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW, | |
352 | dpbp_node->token, &num_of_bufs); | |
353 | if (ret) { | |
354 | RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)", | |
355 | ret); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs); | |
360 | ||
361 | return num_of_bufs; | |
362 | } | |
363 | ||
364 | struct rte_mempool_ops dpaa2_mpool_ops = { | |
365 | .name = "dpaa2", | |
366 | .alloc = rte_hw_mbuf_create_pool, | |
367 | .free = rte_hw_mbuf_free_pool, | |
368 | .enqueue = rte_hw_mbuf_free_bulk, | |
369 | .dequeue = rte_dpaa2_mbuf_alloc_bulk, | |
370 | .get_count = rte_hw_mbuf_get_count, | |
371 | }; | |
372 | ||
373 | MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); |