]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation. | |
3 | * Copyright(c) 2016 6WIND S.A. | |
7c673cae FG |
4 | */ |
5 | ||
6 | #ifndef _RTE_MEMPOOL_H_ | |
7 | #define _RTE_MEMPOOL_H_ | |
8 | ||
9 | /** | |
10 | * @file | |
11 | * RTE Mempool. | |
12 | * | |
13 | * A memory pool is an allocator of fixed-size object. It is | |
14 | * identified by its name, and uses a ring to store free objects. It | |
15 | * provides some other optional services, like a per-core object | |
16 | * cache, and an alignment helper to ensure that objects are padded | |
17 | * to spread them equally on all RAM channels, ranks, and so on. | |
18 | * | |
19 | * Objects owned by a mempool should never be added in another | |
20 | * mempool. When an object is freed using rte_mempool_put() or | |
21 | * equivalent, the object data is not modified; the user can save some | |
22 | * meta-data in the object data and retrieve them when allocating a | |
23 | * new object. | |
24 | * | |
11fdf7f2 TL |
25 | * Note: the mempool implementation is not preemptible. An lcore must not be |
26 | * interrupted by another task that uses the same mempool (because it uses a | |
27 | * ring which is not preemptible). Also, usual mempool functions like | |
28 | * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL | |
29 | * thread due to the internal per-lcore cache. Due to the lack of caching, | |
30 | * rte_mempool_get() or rte_mempool_put() performance will suffer when called | |
31 | * by non-EAL threads. Instead, non-EAL threads should call | |
32 | * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache | |
33 | * created with rte_mempool_cache_create(). | |
7c673cae FG |
34 | */ |
35 | ||
36 | #include <stdio.h> | |
37 | #include <stdlib.h> | |
38 | #include <stdint.h> | |
39 | #include <errno.h> | |
40 | #include <inttypes.h> | |
41 | #include <sys/queue.h> | |
42 | ||
9f95a23c | 43 | #include <rte_config.h> |
7c673cae FG |
44 | #include <rte_spinlock.h> |
45 | #include <rte_log.h> | |
46 | #include <rte_debug.h> | |
47 | #include <rte_lcore.h> | |
48 | #include <rte_memory.h> | |
49 | #include <rte_branch_prediction.h> | |
50 | #include <rte_ring.h> | |
51 | #include <rte_memcpy.h> | |
52 | #include <rte_common.h> | |
53 | ||
54 | #ifdef __cplusplus | |
55 | extern "C" { | |
56 | #endif | |
57 | ||
58 | #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */ | |
59 | #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */ | |
60 | #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ | |
61 | ||
62 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
63 | /** | |
64 | * A structure that stores the mempool statistics (per-lcore). | |
65 | */ | |
66 | struct rte_mempool_debug_stats { | |
67 | uint64_t put_bulk; /**< Number of puts. */ | |
68 | uint64_t put_objs; /**< Number of objects successfully put. */ | |
69 | uint64_t get_success_bulk; /**< Successful allocation number. */ | |
70 | uint64_t get_success_objs; /**< Objects successfully allocated. */ | |
71 | uint64_t get_fail_bulk; /**< Failed allocation number. */ | |
72 | uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ | |
9f95a23c TL |
73 | /** Successful allocation number of contiguous blocks. */ |
74 | uint64_t get_success_blks; | |
75 | /** Failed allocation number of contiguous blocks. */ | |
76 | uint64_t get_fail_blks; | |
7c673cae FG |
77 | } __rte_cache_aligned; |
78 | #endif | |
79 | ||
80 | /** | |
81 | * A structure that stores a per-core object cache. | |
82 | */ | |
83 | struct rte_mempool_cache { | |
84 | uint32_t size; /**< Size of the cache */ | |
85 | uint32_t flushthresh; /**< Threshold before we flush excess elements */ | |
86 | uint32_t len; /**< Current cache count */ | |
87 | /* | |
88 | * Cache is allocated to this size to allow it to overflow in certain | |
89 | * cases to avoid needless emptying of cache. | |
90 | */ | |
91 | void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */ | |
92 | } __rte_cache_aligned; | |
93 | ||
94 | /** | |
95 | * A structure that stores the size of mempool elements. | |
96 | */ | |
97 | struct rte_mempool_objsz { | |
98 | uint32_t elt_size; /**< Size of an element. */ | |
99 | uint32_t header_size; /**< Size of header (before elt). */ | |
100 | uint32_t trailer_size; /**< Size of trailer (after elt). */ | |
101 | uint32_t total_size; | |
102 | /**< Total size of an object (header + elt + trailer). */ | |
103 | }; | |
104 | ||
105 | /**< Maximum length of a memory pool's name. */ | |
106 | #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \ | |
107 | sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1) | |
108 | #define RTE_MEMPOOL_MZ_PREFIX "MP_" | |
109 | ||
110 | /* "MP_<name>" */ | |
111 | #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" | |
112 | ||
113 | #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1) | |
114 | ||
115 | /** Mempool over one chunk of physically continuous memory */ | |
116 | #define MEMPOOL_PG_NUM_DEFAULT 1 | |
117 | ||
118 | #ifndef RTE_MEMPOOL_ALIGN | |
119 | #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE | |
120 | #endif | |
121 | ||
122 | #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) | |
123 | ||
124 | /** | |
125 | * Mempool object header structure | |
126 | * | |
127 | * Each object stored in mempools are prefixed by this header structure, | |
128 | * it allows to retrieve the mempool pointer from the object and to | |
129 | * iterate on all objects attached to a mempool. When debug is enabled, | |
130 | * a cookie is also added in this structure preventing corruptions and | |
131 | * double-frees. | |
132 | */ | |
133 | struct rte_mempool_objhdr { | |
134 | STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */ | |
135 | struct rte_mempool *mp; /**< The mempool owning the object. */ | |
9f95a23c TL |
136 | RTE_STD_C11 |
137 | union { | |
138 | rte_iova_t iova; /**< IO address of the object. */ | |
139 | phys_addr_t physaddr; /**< deprecated - Physical address of the object. */ | |
140 | }; | |
7c673cae FG |
141 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
142 | uint64_t cookie; /**< Debug cookie. */ | |
143 | #endif | |
144 | }; | |
145 | ||
146 | /** | |
147 | * A list of object headers type | |
148 | */ | |
149 | STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr); | |
150 | ||
151 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
152 | ||
153 | /** | |
154 | * Mempool object trailer structure | |
155 | * | |
156 | * In debug mode, each object stored in mempools are suffixed by this | |
157 | * trailer structure containing a cookie preventing memory corruptions. | |
158 | */ | |
159 | struct rte_mempool_objtlr { | |
160 | uint64_t cookie; /**< Debug cookie. */ | |
161 | }; | |
162 | ||
163 | #endif | |
164 | ||
165 | /** | |
166 | * A list of memory where objects are stored | |
167 | */ | |
168 | STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr); | |
169 | ||
170 | /** | |
171 | * Callback used to free a memory chunk | |
172 | */ | |
173 | typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, | |
174 | void *opaque); | |
175 | ||
176 | /** | |
177 | * Mempool objects memory header structure | |
178 | * | |
179 | * The memory chunks where objects are stored. Each chunk is virtually | |
180 | * and physically contiguous. | |
181 | */ | |
182 | struct rte_mempool_memhdr { | |
183 | STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */ | |
184 | struct rte_mempool *mp; /**< The mempool owning the chunk */ | |
185 | void *addr; /**< Virtual address of the chunk */ | |
9f95a23c TL |
186 | RTE_STD_C11 |
187 | union { | |
188 | rte_iova_t iova; /**< IO address of the chunk */ | |
189 | phys_addr_t phys_addr; /**< Physical address of the chunk */ | |
190 | }; | |
7c673cae FG |
191 | size_t len; /**< length of the chunk */ |
192 | rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ | |
193 | void *opaque; /**< Argument passed to the free callback */ | |
194 | }; | |
195 | ||
9f95a23c TL |
196 | /** |
197 | * @warning | |
198 | * @b EXPERIMENTAL: this API may change without prior notice. | |
199 | * | |
200 | * Additional information about the mempool | |
201 | * | |
202 | * The structure is cache-line aligned to avoid ABI breakages in | |
203 | * a number of cases when something small is added. | |
204 | */ | |
205 | struct rte_mempool_info { | |
206 | /** Number of objects in the contiguous block */ | |
207 | unsigned int contig_block_size; | |
208 | } __rte_cache_aligned; | |
209 | ||
7c673cae FG |
210 | /** |
211 | * The RTE mempool structure. | |
212 | */ | |
213 | struct rte_mempool { | |
214 | /* | |
215 | * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI | |
216 | * compatibility requirements, it could be changed to | |
217 | * RTE_MEMPOOL_NAMESIZE next time the ABI changes | |
218 | */ | |
219 | char name[RTE_MEMZONE_NAMESIZE]; /**< Name of mempool. */ | |
220 | RTE_STD_C11 | |
221 | union { | |
222 | void *pool_data; /**< Ring or pool to store objects. */ | |
223 | uint64_t pool_id; /**< External mempool identifier. */ | |
224 | }; | |
225 | void *pool_config; /**< optional args for ops alloc. */ | |
226 | const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */ | |
9f95a23c | 227 | unsigned int flags; /**< Flags of the mempool. */ |
7c673cae FG |
228 | int socket_id; /**< Socket id passed at create. */ |
229 | uint32_t size; /**< Max size of the mempool. */ | |
230 | uint32_t cache_size; | |
231 | /**< Size of per-lcore default local cache. */ | |
232 | ||
233 | uint32_t elt_size; /**< Size of an element. */ | |
234 | uint32_t header_size; /**< Size of header (before elt). */ | |
235 | uint32_t trailer_size; /**< Size of trailer (after elt). */ | |
236 | ||
237 | unsigned private_data_size; /**< Size of private data. */ | |
238 | /** | |
239 | * Index into rte_mempool_ops_table array of mempool ops | |
240 | * structs, which contain callback function pointers. | |
241 | * We're using an index here rather than pointers to the callbacks | |
242 | * to facilitate any secondary processes that may want to use | |
243 | * this mempool. | |
244 | */ | |
245 | int32_t ops_index; | |
246 | ||
247 | struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */ | |
248 | ||
249 | uint32_t populated_size; /**< Number of populated objects. */ | |
250 | struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */ | |
251 | uint32_t nb_mem_chunks; /**< Number of memory chunks */ | |
252 | struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */ | |
253 | ||
254 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
255 | /** Per-lcore statistics. */ | |
256 | struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; | |
257 | #endif | |
258 | } __rte_cache_aligned; | |
259 | ||
260 | #define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */ | |
261 | #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ | |
262 | #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ | |
263 | #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ | |
264 | #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ | |
9f95a23c TL |
265 | #define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */ |
266 | #define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */ | |
7c673cae FG |
267 | |
268 | /** | |
269 | * @internal When debug is enabled, store some statistics. | |
270 | * | |
271 | * @param mp | |
272 | * Pointer to the memory pool. | |
273 | * @param name | |
274 | * Name of the statistics field to increment in the memory pool. | |
275 | * @param n | |
276 | * Number to add to the object-oriented statistics. | |
277 | */ | |
278 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
279 | #define __MEMPOOL_STAT_ADD(mp, name, n) do { \ | |
280 | unsigned __lcore_id = rte_lcore_id(); \ | |
281 | if (__lcore_id < RTE_MAX_LCORE) { \ | |
282 | mp->stats[__lcore_id].name##_objs += n; \ | |
283 | mp->stats[__lcore_id].name##_bulk += 1; \ | |
284 | } \ | |
285 | } while(0) | |
9f95a23c TL |
286 | #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \ |
287 | unsigned int __lcore_id = rte_lcore_id(); \ | |
288 | if (__lcore_id < RTE_MAX_LCORE) { \ | |
289 | mp->stats[__lcore_id].name##_blks += n; \ | |
290 | mp->stats[__lcore_id].name##_bulk += 1; \ | |
291 | } \ | |
292 | } while (0) | |
7c673cae FG |
293 | #else |
294 | #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) | |
9f95a23c | 295 | #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0) |
7c673cae FG |
296 | #endif |
297 | ||
298 | /** | |
299 | * Calculate the size of the mempool header. | |
300 | * | |
301 | * @param mp | |
302 | * Pointer to the memory pool. | |
303 | * @param cs | |
304 | * Size of the per-lcore cache. | |
305 | */ | |
306 | #define MEMPOOL_HEADER_SIZE(mp, cs) \ | |
307 | (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ | |
308 | (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) | |
309 | ||
310 | /* return the header of a mempool object (internal) */ | |
311 | static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj) | |
312 | { | |
313 | return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, | |
314 | sizeof(struct rte_mempool_objhdr)); | |
315 | } | |
316 | ||
317 | /** | |
318 | * Return a pointer to the mempool owning this object. | |
319 | * | |
320 | * @param obj | |
321 | * An object that is owned by a pool. If this is not the case, | |
322 | * the behavior is undefined. | |
323 | * @return | |
324 | * A pointer to the mempool structure. | |
325 | */ | |
326 | static inline struct rte_mempool *rte_mempool_from_obj(void *obj) | |
327 | { | |
328 | struct rte_mempool_objhdr *hdr = __mempool_get_header(obj); | |
329 | return hdr->mp; | |
330 | } | |
331 | ||
332 | /* return the trailer of a mempool object (internal) */ | |
333 | static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj) | |
334 | { | |
335 | struct rte_mempool *mp = rte_mempool_from_obj(obj); | |
336 | return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size); | |
337 | } | |
338 | ||
339 | /** | |
340 | * @internal Check and update cookies or panic. | |
341 | * | |
342 | * @param mp | |
343 | * Pointer to the memory pool. | |
344 | * @param obj_table_const | |
345 | * Pointer to a table of void * pointers (objects). | |
346 | * @param n | |
347 | * Index of object in object table. | |
348 | * @param free | |
349 | * - 0: object is supposed to be allocated, mark it as free | |
350 | * - 1: object is supposed to be free, mark it as allocated | |
351 | * - 2: just check that cookie is valid (free or allocated) | |
352 | */ | |
353 | void rte_mempool_check_cookies(const struct rte_mempool *mp, | |
354 | void * const *obj_table_const, unsigned n, int free); | |
355 | ||
356 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
357 | #define __mempool_check_cookies(mp, obj_table_const, n, free) \ | |
358 | rte_mempool_check_cookies(mp, obj_table_const, n, free) | |
359 | #else | |
360 | #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) | |
361 | #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ | |
362 | ||
9f95a23c TL |
363 | /** |
364 | * @warning | |
365 | * @b EXPERIMENTAL: this API may change without prior notice. | |
366 | * | |
367 | * @internal Check contiguous object blocks and update cookies or panic. | |
368 | * | |
369 | * @param mp | |
370 | * Pointer to the memory pool. | |
371 | * @param first_obj_table_const | |
372 | * Pointer to a table of void * pointers (first object of the contiguous | |
373 | * object blocks). | |
374 | * @param n | |
375 | * Number of contiguous object blocks. | |
376 | * @param free | |
377 | * - 0: object is supposed to be allocated, mark it as free | |
378 | * - 1: object is supposed to be free, mark it as allocated | |
379 | * - 2: just check that cookie is valid (free or allocated) | |
380 | */ | |
381 | void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, | |
382 | void * const *first_obj_table_const, unsigned int n, int free); | |
383 | ||
384 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
385 | #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ | |
386 | free) \ | |
387 | rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ | |
388 | free) | |
389 | #else | |
390 | #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ | |
391 | free) \ | |
392 | do {} while (0) | |
393 | #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ | |
394 | ||
7c673cae FG |
395 | #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */ |
396 | ||
397 | /** | |
398 | * Prototype for implementation specific data provisioning function. | |
399 | * | |
400 | * The function should provide the implementation specific memory for | |
11fdf7f2 | 401 | * use by the other mempool ops functions in a given mempool ops struct. |
7c673cae FG |
402 | * E.g. the default ops provides an instance of the rte_ring for this purpose. |
403 | * it will most likely point to a different type of data structure, and | |
404 | * will be transparent to the application programmer. | |
405 | * This function should set mp->pool_data. | |
406 | */ | |
407 | typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); | |
408 | ||
409 | /** | |
410 | * Free the opaque private data pointed to by mp->pool_data pointer. | |
411 | */ | |
412 | typedef void (*rte_mempool_free_t)(struct rte_mempool *mp); | |
413 | ||
414 | /** | |
415 | * Enqueue an object into the external pool. | |
416 | */ | |
417 | typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, | |
418 | void * const *obj_table, unsigned int n); | |
419 | ||
420 | /** | |
421 | * Dequeue an object from the external pool. | |
422 | */ | |
423 | typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, | |
424 | void **obj_table, unsigned int n); | |
425 | ||
9f95a23c TL |
426 | /** |
427 | * @warning | |
428 | * @b EXPERIMENTAL: this API may change without prior notice. | |
429 | * | |
430 | * Dequeue a number of contiguous object blocks from the external pool. | |
431 | */ | |
432 | typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, | |
433 | void **first_obj_table, unsigned int n); | |
434 | ||
7c673cae FG |
435 | /** |
436 | * Return the number of available objects in the external pool. | |
437 | */ | |
438 | typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); | |
439 | ||
9f95a23c TL |
440 | /** |
441 | * Calculate memory size required to store given number of objects. | |
442 | * | |
443 | * If mempool objects are not required to be IOVA-contiguous | |
444 | * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines | |
445 | * virtually contiguous chunk size. Otherwise, if mempool objects must | |
446 | * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear), | |
447 | * min_chunk_size defines IOVA-contiguous chunk size. | |
448 | * | |
449 | * @param[in] mp | |
450 | * Pointer to the memory pool. | |
451 | * @param[in] obj_num | |
452 | * Number of objects. | |
453 | * @param[in] pg_shift | |
454 | * LOG2 of the physical pages size. If set to 0, ignore page boundaries. | |
455 | * @param[out] min_chunk_size | |
456 | * Location for minimum size of the memory chunk which may be used to | |
457 | * store memory pool objects. | |
458 | * @param[out] align | |
459 | * Location for required memory chunk alignment. | |
460 | * @return | |
461 | * Required memory size aligned at page boundary. | |
462 | */ | |
463 | typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, | |
464 | uint32_t obj_num, uint32_t pg_shift, | |
465 | size_t *min_chunk_size, size_t *align); | |
466 | ||
467 | /** | |
468 | * Default way to calculate memory size required to store given number of | |
469 | * objects. | |
470 | * | |
471 | * If page boundaries may be ignored, it is just a product of total | |
472 | * object size including header and trailer and number of objects. | |
473 | * Otherwise, it is a number of pages required to store given number of | |
474 | * objects without crossing page boundary. | |
475 | * | |
476 | * Note that if object size is bigger than page size, then it assumes | |
477 | * that pages are grouped in subsets of physically continuous pages big | |
478 | * enough to store at least one object. | |
479 | * | |
480 | * Minimum size of memory chunk is a maximum of the page size and total | |
481 | * element size. | |
482 | * | |
483 | * Required memory chunk alignment is a maximum of page size and cache | |
484 | * line size. | |
485 | */ | |
486 | ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, | |
487 | uint32_t obj_num, uint32_t pg_shift, | |
488 | size_t *min_chunk_size, size_t *align); | |
489 | ||
490 | /** | |
491 | * Function to be called for each populated object. | |
492 | * | |
493 | * @param[in] mp | |
494 | * A pointer to the mempool structure. | |
495 | * @param[in] opaque | |
496 | * An opaque pointer passed to iterator. | |
497 | * @param[in] vaddr | |
498 | * Object virtual address. | |
499 | * @param[in] iova | |
500 | * Input/output virtual address of the object or RTE_BAD_IOVA. | |
501 | */ | |
502 | typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, | |
503 | void *opaque, void *vaddr, rte_iova_t iova); | |
504 | ||
505 | /** | |
506 | * Populate memory pool objects using provided memory chunk. | |
507 | * | |
508 | * Populated objects should be enqueued to the pool, e.g. using | |
509 | * rte_mempool_ops_enqueue_bulk(). | |
510 | * | |
511 | * If the given IO address is unknown (iova = RTE_BAD_IOVA), | |
512 | * the chunk doesn't need to be physically contiguous (only virtually), | |
513 | * and allocated objects may span two pages. | |
514 | * | |
515 | * @param[in] mp | |
516 | * A pointer to the mempool structure. | |
517 | * @param[in] max_objs | |
518 | * Maximum number of objects to be populated. | |
519 | * @param[in] vaddr | |
520 | * The virtual address of memory that should be used to store objects. | |
521 | * @param[in] iova | |
522 | * The IO address | |
523 | * @param[in] len | |
524 | * The length of memory in bytes. | |
525 | * @param[in] obj_cb | |
526 | * Callback function to be executed for each populated object. | |
527 | * @param[in] obj_cb_arg | |
528 | * An opaque pointer passed to the callback function. | |
529 | * @return | |
530 | * The number of objects added on success. | |
531 | * On error, no objects are populated and a negative errno is returned. | |
532 | */ | |
533 | typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, | |
534 | unsigned int max_objs, | |
535 | void *vaddr, rte_iova_t iova, size_t len, | |
536 | rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); | |
537 | ||
538 | /** | |
539 | * Default way to populate memory pool object using provided memory | |
540 | * chunk: just slice objects one by one. | |
541 | */ | |
542 | int rte_mempool_op_populate_default(struct rte_mempool *mp, | |
543 | unsigned int max_objs, | |
544 | void *vaddr, rte_iova_t iova, size_t len, | |
545 | rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); | |
546 | ||
547 | /** | |
548 | * @warning | |
549 | * @b EXPERIMENTAL: this API may change without prior notice. | |
550 | * | |
551 | * Get some additional information about a mempool. | |
552 | */ | |
553 | typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, | |
554 | struct rte_mempool_info *info); | |
555 | ||
556 | ||
7c673cae FG |
557 | /** Structure defining mempool operations structure */ |
558 | struct rte_mempool_ops { | |
559 | char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ | |
560 | rte_mempool_alloc_t alloc; /**< Allocate private data. */ | |
561 | rte_mempool_free_t free; /**< Free the external pool. */ | |
562 | rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */ | |
563 | rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ | |
564 | rte_mempool_get_count get_count; /**< Get qty of available objs. */ | |
9f95a23c TL |
565 | /** |
566 | * Optional callback to calculate memory size required to | |
567 | * store specified number of objects. | |
568 | */ | |
569 | rte_mempool_calc_mem_size_t calc_mem_size; | |
570 | /** | |
571 | * Optional callback to populate mempool objects using | |
572 | * provided memory chunk. | |
573 | */ | |
574 | rte_mempool_populate_t populate; | |
575 | /** | |
576 | * Get mempool info | |
577 | */ | |
578 | rte_mempool_get_info_t get_info; | |
579 | /** | |
580 | * Dequeue a number of contiguous object blocks. | |
581 | */ | |
582 | rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks; | |
7c673cae FG |
583 | } __rte_cache_aligned; |
584 | ||
585 | #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ | |
586 | ||
587 | /** | |
588 | * Structure storing the table of registered ops structs, each of which contain | |
589 | * the function pointers for the mempool ops functions. | |
590 | * Each process has its own storage for this ops struct array so that | |
591 | * the mempools can be shared across primary and secondary processes. | |
592 | * The indices used to access the array are valid across processes, whereas | |
593 | * any function pointers stored directly in the mempool struct would not be. | |
594 | * This results in us simply having "ops_index" in the mempool struct. | |
595 | */ | |
596 | struct rte_mempool_ops_table { | |
597 | rte_spinlock_t sl; /**< Spinlock for add/delete. */ | |
598 | uint32_t num_ops; /**< Number of used ops structs in the table. */ | |
599 | /** | |
600 | * Storage for all possible ops structs. | |
601 | */ | |
602 | struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]; | |
603 | } __rte_cache_aligned; | |
604 | ||
605 | /** Array of registered ops structs. */ | |
606 | extern struct rte_mempool_ops_table rte_mempool_ops_table; | |
607 | ||
608 | /** | |
609 | * @internal Get the mempool ops struct from its index. | |
610 | * | |
611 | * @param ops_index | |
612 | * The index of the ops struct in the ops struct table. It must be a valid | |
613 | * index: (0 <= idx < num_ops). | |
614 | * @return | |
615 | * The pointer to the ops struct in the table. | |
616 | */ | |
617 | static inline struct rte_mempool_ops * | |
618 | rte_mempool_get_ops(int ops_index) | |
619 | { | |
620 | RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX)); | |
621 | ||
622 | return &rte_mempool_ops_table.ops[ops_index]; | |
623 | } | |
624 | ||
625 | /** | |
626 | * @internal Wrapper for mempool_ops alloc callback. | |
627 | * | |
628 | * @param mp | |
629 | * Pointer to the memory pool. | |
630 | * @return | |
631 | * - 0: Success; successfully allocated mempool pool_data. | |
632 | * - <0: Error; code of alloc function. | |
633 | */ | |
634 | int | |
635 | rte_mempool_ops_alloc(struct rte_mempool *mp); | |
636 | ||
637 | /** | |
638 | * @internal Wrapper for mempool_ops dequeue callback. | |
639 | * | |
640 | * @param mp | |
641 | * Pointer to the memory pool. | |
642 | * @param obj_table | |
643 | * Pointer to a table of void * pointers (objects). | |
644 | * @param n | |
645 | * Number of objects to get. | |
646 | * @return | |
647 | * - 0: Success; got n objects. | |
648 | * - <0: Error; code of dequeue function. | |
649 | */ | |
650 | static inline int | |
651 | rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, | |
652 | void **obj_table, unsigned n) | |
653 | { | |
654 | struct rte_mempool_ops *ops; | |
655 | ||
656 | ops = rte_mempool_get_ops(mp->ops_index); | |
657 | return ops->dequeue(mp, obj_table, n); | |
658 | } | |
659 | ||
9f95a23c TL |
660 | /** |
661 | * @internal Wrapper for mempool_ops dequeue_contig_blocks callback. | |
662 | * | |
663 | * @param[in] mp | |
664 | * Pointer to the memory pool. | |
665 | * @param[out] first_obj_table | |
666 | * Pointer to a table of void * pointers (first objects). | |
667 | * @param[in] n | |
668 | * Number of blocks to get. | |
669 | * @return | |
670 | * - 0: Success; got n objects. | |
671 | * - <0: Error; code of dequeue function. | |
672 | */ | |
673 | static inline int | |
674 | rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, | |
675 | void **first_obj_table, unsigned int n) | |
676 | { | |
677 | struct rte_mempool_ops *ops; | |
678 | ||
679 | ops = rte_mempool_get_ops(mp->ops_index); | |
680 | RTE_ASSERT(ops->dequeue_contig_blocks != NULL); | |
681 | return ops->dequeue_contig_blocks(mp, first_obj_table, n); | |
682 | } | |
683 | ||
7c673cae FG |
684 | /** |
685 | * @internal wrapper for mempool_ops enqueue callback. | |
686 | * | |
687 | * @param mp | |
688 | * Pointer to the memory pool. | |
689 | * @param obj_table | |
690 | * Pointer to a table of void * pointers (objects). | |
691 | * @param n | |
692 | * Number of objects to put. | |
693 | * @return | |
694 | * - 0: Success; n objects supplied. | |
695 | * - <0: Error; code of enqueue function. | |
696 | */ | |
697 | static inline int | |
698 | rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, | |
699 | unsigned n) | |
700 | { | |
701 | struct rte_mempool_ops *ops; | |
702 | ||
703 | ops = rte_mempool_get_ops(mp->ops_index); | |
704 | return ops->enqueue(mp, obj_table, n); | |
705 | } | |
706 | ||
707 | /** | |
708 | * @internal wrapper for mempool_ops get_count callback. | |
709 | * | |
710 | * @param mp | |
711 | * Pointer to the memory pool. | |
712 | * @return | |
713 | * The number of available objects in the external pool. | |
714 | */ | |
715 | unsigned | |
716 | rte_mempool_ops_get_count(const struct rte_mempool *mp); | |
717 | ||
9f95a23c TL |
718 | /** |
719 | * @internal wrapper for mempool_ops calc_mem_size callback. | |
720 | * API to calculate size of memory required to store specified number of | |
721 | * object. | |
722 | * | |
723 | * @param[in] mp | |
724 | * Pointer to the memory pool. | |
725 | * @param[in] obj_num | |
726 | * Number of objects. | |
727 | * @param[in] pg_shift | |
728 | * LOG2 of the physical pages size. If set to 0, ignore page boundaries. | |
729 | * @param[out] min_chunk_size | |
730 | * Location for minimum size of the memory chunk which may be used to | |
731 | * store memory pool objects. | |
732 | * @param[out] align | |
733 | * Location for required memory chunk alignment. | |
734 | * @return | |
735 | * Required memory size aligned at page boundary. | |
736 | */ | |
737 | ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, | |
738 | uint32_t obj_num, uint32_t pg_shift, | |
739 | size_t *min_chunk_size, size_t *align); | |
740 | ||
741 | /** | |
742 | * @internal wrapper for mempool_ops populate callback. | |
743 | * | |
744 | * Populate memory pool objects using provided memory chunk. | |
745 | * | |
746 | * @param[in] mp | |
747 | * A pointer to the mempool structure. | |
748 | * @param[in] max_objs | |
749 | * Maximum number of objects to be populated. | |
750 | * @param[in] vaddr | |
751 | * The virtual address of memory that should be used to store objects. | |
752 | * @param[in] iova | |
753 | * The IO address | |
754 | * @param[in] len | |
755 | * The length of memory in bytes. | |
756 | * @param[in] obj_cb | |
757 | * Callback function to be executed for each populated object. | |
758 | * @param[in] obj_cb_arg | |
759 | * An opaque pointer passed to the callback function. | |
760 | * @return | |
761 | * The number of objects added on success. | |
762 | * On error, no objects are populated and a negative errno is returned. | |
763 | */ | |
764 | int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, | |
765 | void *vaddr, rte_iova_t iova, size_t len, | |
766 | rte_mempool_populate_obj_cb_t *obj_cb, | |
767 | void *obj_cb_arg); | |
768 | ||
769 | /** | |
770 | * @warning | |
771 | * @b EXPERIMENTAL: this API may change without prior notice. | |
772 | * | |
773 | * Wrapper for mempool_ops get_info callback. | |
774 | * | |
775 | * @param[in] mp | |
776 | * Pointer to the memory pool. | |
777 | * @param[out] info | |
778 | * Pointer to the rte_mempool_info structure | |
779 | * @return | |
780 | * - 0: Success; The mempool driver supports retrieving supplementary | |
781 | * mempool information | |
782 | * - -ENOTSUP - doesn't support get_info ops (valid case). | |
783 | */ | |
784 | __rte_experimental | |
785 | int rte_mempool_ops_get_info(const struct rte_mempool *mp, | |
786 | struct rte_mempool_info *info); | |
787 | ||
7c673cae FG |
788 | /** |
789 | * @internal wrapper for mempool_ops free callback. | |
790 | * | |
791 | * @param mp | |
792 | * Pointer to the memory pool. | |
793 | */ | |
794 | void | |
795 | rte_mempool_ops_free(struct rte_mempool *mp); | |
796 | ||
797 | /** | |
798 | * Set the ops of a mempool. | |
799 | * | |
800 | * This can only be done on a mempool that is not populated, i.e. just after | |
801 | * a call to rte_mempool_create_empty(). | |
802 | * | |
803 | * @param mp | |
804 | * Pointer to the memory pool. | |
805 | * @param name | |
806 | * Name of the ops structure to use for this mempool. | |
807 | * @param pool_config | |
808 | * Opaque data that can be passed by the application to the ops functions. | |
809 | * @return | |
810 | * - 0: Success; the mempool is now using the requested ops functions. | |
811 | * - -EINVAL - Invalid ops struct name provided. | |
812 | * - -EEXIST - mempool already has an ops struct assigned. | |
813 | */ | |
814 | int | |
815 | rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, | |
816 | void *pool_config); | |
817 | ||
818 | /** | |
819 | * Register mempool operations. | |
820 | * | |
821 | * @param ops | |
822 | * Pointer to an ops structure to register. | |
823 | * @return | |
824 | * - >=0: Success; return the index of the ops struct in the table. | |
825 | * - -EINVAL - some missing callbacks while registering ops struct. | |
826 | * - -ENOSPC - the maximum number of ops structs has been reached. | |
827 | */ | |
828 | int rte_mempool_register_ops(const struct rte_mempool_ops *ops); | |
829 | ||
830 | /** | |
831 | * Macro to statically register the ops of a mempool handler. | |
832 | * Note that the rte_mempool_register_ops fails silently here when | |
11fdf7f2 | 833 | * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. |
7c673cae | 834 | */ |
9f95a23c TL |
835 | #define MEMPOOL_REGISTER_OPS(ops) \ |
836 | RTE_INIT(mp_hdlr_init_##ops) \ | |
837 | { \ | |
7c673cae FG |
838 | rte_mempool_register_ops(&ops); \ |
839 | } | |
840 | ||
841 | /** | |
842 | * An object callback function for mempool. | |
843 | * | |
844 | * Used by rte_mempool_create() and rte_mempool_obj_iter(). | |
845 | */ | |
846 | typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp, | |
847 | void *opaque, void *obj, unsigned obj_idx); | |
848 | typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */ | |
849 | ||
850 | /** | |
851 | * A memory callback function for mempool. | |
852 | * | |
853 | * Used by rte_mempool_mem_iter(). | |
854 | */ | |
855 | typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp, | |
856 | void *opaque, struct rte_mempool_memhdr *memhdr, | |
857 | unsigned mem_idx); | |
858 | ||
859 | /** | |
860 | * A mempool constructor callback function. | |
861 | * | |
862 | * Arguments are the mempool and the opaque pointer given by the user in | |
863 | * rte_mempool_create(). | |
864 | */ | |
865 | typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); | |
866 | ||
867 | /** | |
868 | * Create a new mempool named *name* in memory. | |
869 | * | |
870 | * This function uses ``rte_memzone_reserve()`` to allocate memory. The | |
871 | * pool contains n elements of elt_size. Its size is set to n. | |
872 | * | |
873 | * @param name | |
874 | * The name of the mempool. | |
875 | * @param n | |
876 | * The number of elements in the mempool. The optimum size (in terms of | |
877 | * memory usage) for a mempool is when n is a power of two minus one: | |
878 | * n = (2^q - 1). | |
879 | * @param elt_size | |
880 | * The size of each element. | |
881 | * @param cache_size | |
882 | * If cache_size is non-zero, the rte_mempool library will try to | |
883 | * limit the accesses to the common lockless pool, by maintaining a | |
884 | * per-lcore object cache. This argument must be lower or equal to | |
885 | * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose | |
886 | * cache_size to have "n modulo cache_size == 0": if this is | |
887 | * not the case, some elements will always stay in the pool and will | |
888 | * never be used. The access to the per-lcore table is of course | |
889 | * faster than the multi-producer/consumer pool. The cache can be | |
890 | * disabled if the cache_size argument is set to 0; it can be useful to | |
11fdf7f2 | 891 | * avoid losing objects in cache. |
7c673cae FG |
892 | * @param private_data_size |
893 | * The size of the private data appended after the mempool | |
894 | * structure. This is useful for storing some private data after the | |
895 | * mempool structure, as is done for rte_mbuf_pool for example. | |
896 | * @param mp_init | |
897 | * A function pointer that is called for initialization of the pool, | |
898 | * before object initialization. The user can initialize the private | |
899 | * data in this function if needed. This parameter can be NULL if | |
900 | * not needed. | |
901 | * @param mp_init_arg | |
902 | * An opaque pointer to data that can be used in the mempool | |
903 | * constructor function. | |
904 | * @param obj_init | |
905 | * A function pointer that is called for each object at | |
906 | * initialization of the pool. The user can set some meta data in | |
907 | * objects if needed. This parameter can be NULL if not needed. | |
908 | * The obj_init() function takes the mempool pointer, the init_arg, | |
909 | * the object pointer and the object number as parameters. | |
910 | * @param obj_init_arg | |
911 | * An opaque pointer to data that can be used as an argument for | |
912 | * each call to the object constructor function. | |
913 | * @param socket_id | |
914 | * The *socket_id* argument is the socket identifier in the case of | |
915 | * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA | |
916 | * constraint for the reserved zone. | |
917 | * @param flags | |
918 | * The *flags* arguments is an OR of following flags: | |
919 | * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread | |
920 | * between channels in RAM: the pool allocator will add padding | |
921 | * between objects depending on the hardware configuration. See | |
922 | * Memory alignment constraints for details. If this flag is set, | |
923 | * the allocator will just align them to a cache line. | |
924 | * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are | |
925 | * cache-aligned. This flag removes this constraint, and no | |
926 | * padding will be present between objects. This flag implies | |
927 | * MEMPOOL_F_NO_SPREAD. | |
928 | * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior | |
929 | * when using rte_mempool_put() or rte_mempool_put_bulk() is | |
930 | * "single-producer". Otherwise, it is "multi-producers". | |
931 | * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior | |
932 | * when using rte_mempool_get() or rte_mempool_get_bulk() is | |
933 | * "single-consumer". Otherwise, it is "multi-consumers". | |
9f95a23c TL |
934 | * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't |
935 | * necessarily be contiguous in IO memory. | |
7c673cae FG |
936 | * @return |
937 | * The pointer to the new allocated mempool, on success. NULL on error | |
938 | * with rte_errno set appropriately. Possible rte_errno values include: | |
939 | * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure | |
940 | * - E_RTE_SECONDARY - function was called from a secondary process instance | |
941 | * - EINVAL - cache size provided is too large | |
942 | * - ENOSPC - the maximum number of memzones has already been allocated | |
943 | * - EEXIST - a memzone with the same name already exists | |
944 | * - ENOMEM - no appropriate memory area found in which to create memzone | |
945 | */ | |
946 | struct rte_mempool * | |
947 | rte_mempool_create(const char *name, unsigned n, unsigned elt_size, | |
948 | unsigned cache_size, unsigned private_data_size, | |
949 | rte_mempool_ctor_t *mp_init, void *mp_init_arg, | |
950 | rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, | |
951 | int socket_id, unsigned flags); | |
952 | ||
7c673cae FG |
953 | /** |
954 | * Create an empty mempool | |
955 | * | |
956 | * The mempool is allocated and initialized, but it is not populated: no | |
957 | * memory is allocated for the mempool elements. The user has to call | |
958 | * rte_mempool_populate_*() to add memory chunks to the pool. Once | |
959 | * populated, the user may also want to initialize each object with | |
960 | * rte_mempool_obj_iter(). | |
961 | * | |
962 | * @param name | |
963 | * The name of the mempool. | |
964 | * @param n | |
965 | * The maximum number of elements that can be added in the mempool. | |
966 | * The optimum size (in terms of memory usage) for a mempool is when n | |
967 | * is a power of two minus one: n = (2^q - 1). | |
968 | * @param elt_size | |
969 | * The size of each element. | |
970 | * @param cache_size | |
971 | * Size of the cache. See rte_mempool_create() for details. | |
972 | * @param private_data_size | |
973 | * The size of the private data appended after the mempool | |
974 | * structure. This is useful for storing some private data after the | |
975 | * mempool structure, as is done for rte_mbuf_pool for example. | |
976 | * @param socket_id | |
977 | * The *socket_id* argument is the socket identifier in the case of | |
978 | * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA | |
979 | * constraint for the reserved zone. | |
980 | * @param flags | |
981 | * Flags controlling the behavior of the mempool. See | |
982 | * rte_mempool_create() for details. | |
983 | * @return | |
984 | * The pointer to the new allocated mempool, on success. NULL on error | |
985 | * with rte_errno set appropriately. See rte_mempool_create() for details. | |
986 | */ | |
987 | struct rte_mempool * | |
988 | rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, | |
989 | unsigned cache_size, unsigned private_data_size, | |
990 | int socket_id, unsigned flags); | |
991 | /** | |
992 | * Free a mempool | |
993 | * | |
994 | * Unlink the mempool from global list, free the memory chunks, and all | |
995 | * memory referenced by the mempool. The objects must not be used by | |
996 | * other cores as they will be freed. | |
997 | * | |
998 | * @param mp | |
999 | * A pointer to the mempool structure. | |
1000 | */ | |
1001 | void | |
1002 | rte_mempool_free(struct rte_mempool *mp); | |
1003 | ||
1004 | /** | |
1005 | * Add physically contiguous memory for objects in the pool at init | |
1006 | * | |
1007 | * Add a virtually and physically contiguous memory chunk in the pool | |
11fdf7f2 | 1008 | * where objects can be instantiated. |
7c673cae | 1009 | * |
9f95a23c | 1010 | * If the given IO address is unknown (iova = RTE_BAD_IOVA), |
7c673cae FG |
1011 | * the chunk doesn't need to be physically contiguous (only virtually), |
1012 | * and allocated objects may span two pages. | |
1013 | * | |
1014 | * @param mp | |
1015 | * A pointer to the mempool structure. | |
1016 | * @param vaddr | |
1017 | * The virtual address of memory that should be used to store objects. | |
9f95a23c TL |
1018 | * @param iova |
1019 | * The IO address | |
7c673cae FG |
1020 | * @param len |
1021 | * The length of memory in bytes. | |
1022 | * @param free_cb | |
1023 | * The callback used to free this chunk when destroying the mempool. | |
1024 | * @param opaque | |
1025 | * An opaque argument passed to free_cb. | |
1026 | * @return | |
1027 | * The number of objects added on success. | |
1028 | * On error, the chunk is not added in the memory list of the | |
1029 | * mempool and a negative errno is returned. | |
1030 | */ | |
9f95a23c TL |
1031 | int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, |
1032 | rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, | |
7c673cae FG |
1033 | void *opaque); |
1034 | ||
7c673cae FG |
1035 | /** |
1036 | * Add virtually contiguous memory for objects in the pool at init | |
1037 | * | |
1038 | * Add a virtually contiguous memory chunk in the pool where objects can | |
11fdf7f2 | 1039 | * be instantiated. |
7c673cae FG |
1040 | * |
1041 | * @param mp | |
1042 | * A pointer to the mempool structure. | |
1043 | * @param addr | |
1044 | * The virtual address of memory that should be used to store objects. | |
1045 | * Must be page-aligned. | |
1046 | * @param len | |
1047 | * The length of memory in bytes. Must be page-aligned. | |
1048 | * @param pg_sz | |
1049 | * The size of memory pages in this virtual area. | |
1050 | * @param free_cb | |
1051 | * The callback used to free this chunk when destroying the mempool. | |
1052 | * @param opaque | |
1053 | * An opaque argument passed to free_cb. | |
1054 | * @return | |
1055 | * The number of objects added on success. | |
1056 | * On error, the chunk is not added in the memory list of the | |
1057 | * mempool and a negative errno is returned. | |
1058 | */ | |
1059 | int | |
1060 | rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, | |
1061 | size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, | |
1062 | void *opaque); | |
1063 | ||
1064 | /** | |
1065 | * Add memory for objects in the pool at init | |
1066 | * | |
1067 | * This is the default function used by rte_mempool_create() to populate | |
1068 | * the mempool. It adds memory allocated using rte_memzone_reserve(). | |
1069 | * | |
1070 | * @param mp | |
1071 | * A pointer to the mempool structure. | |
1072 | * @return | |
1073 | * The number of objects added on success. | |
1074 | * On error, the chunk is not added in the memory list of the | |
1075 | * mempool and a negative errno is returned. | |
1076 | */ | |
1077 | int rte_mempool_populate_default(struct rte_mempool *mp); | |
1078 | ||
1079 | /** | |
1080 | * Add memory from anonymous mapping for objects in the pool at init | |
1081 | * | |
1082 | * This function mmap an anonymous memory zone that is locked in | |
1083 | * memory to store the objects of the mempool. | |
1084 | * | |
1085 | * @param mp | |
1086 | * A pointer to the mempool structure. | |
1087 | * @return | |
1088 | * The number of objects added on success. | |
1089 | * On error, the chunk is not added in the memory list of the | |
1090 | * mempool and a negative errno is returned. | |
1091 | */ | |
1092 | int rte_mempool_populate_anon(struct rte_mempool *mp); | |
1093 | ||
1094 | /** | |
1095 | * Call a function for each mempool element | |
1096 | * | |
1097 | * Iterate across all objects attached to a rte_mempool and call the | |
1098 | * callback function on it. | |
1099 | * | |
1100 | * @param mp | |
1101 | * A pointer to an initialized mempool. | |
1102 | * @param obj_cb | |
1103 | * A function pointer that is called for each object. | |
1104 | * @param obj_cb_arg | |
1105 | * An opaque pointer passed to the callback function. | |
1106 | * @return | |
1107 | * Number of objects iterated. | |
1108 | */ | |
1109 | uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, | |
1110 | rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg); | |
1111 | ||
1112 | /** | |
1113 | * Call a function for each mempool memory chunk | |
1114 | * | |
1115 | * Iterate across all memory chunks attached to a rte_mempool and call | |
1116 | * the callback function on it. | |
1117 | * | |
1118 | * @param mp | |
1119 | * A pointer to an initialized mempool. | |
1120 | * @param mem_cb | |
1121 | * A function pointer that is called for each memory chunk. | |
1122 | * @param mem_cb_arg | |
1123 | * An opaque pointer passed to the callback function. | |
1124 | * @return | |
1125 | * Number of memory chunks iterated. | |
1126 | */ | |
1127 | uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, | |
1128 | rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg); | |
1129 | ||
1130 | /** | |
1131 | * Dump the status of the mempool to a file. | |
1132 | * | |
1133 | * @param f | |
1134 | * A pointer to a file for output | |
1135 | * @param mp | |
1136 | * A pointer to the mempool structure. | |
1137 | */ | |
1138 | void rte_mempool_dump(FILE *f, struct rte_mempool *mp); | |
1139 | ||
1140 | /** | |
1141 | * Create a user-owned mempool cache. | |
1142 | * | |
1143 | * This can be used by non-EAL threads to enable caching when they | |
1144 | * interact with a mempool. | |
1145 | * | |
1146 | * @param size | |
1147 | * The size of the mempool cache. See rte_mempool_create()'s cache_size | |
1148 | * parameter description for more information. The same limits and | |
1149 | * considerations apply here too. | |
1150 | * @param socket_id | |
1151 | * The socket identifier in the case of NUMA. The value can be | |
1152 | * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone. | |
1153 | */ | |
1154 | struct rte_mempool_cache * | |
1155 | rte_mempool_cache_create(uint32_t size, int socket_id); | |
1156 | ||
1157 | /** | |
1158 | * Free a user-owned mempool cache. | |
1159 | * | |
1160 | * @param cache | |
1161 | * A pointer to the mempool cache. | |
1162 | */ | |
1163 | void | |
1164 | rte_mempool_cache_free(struct rte_mempool_cache *cache); | |
1165 | ||
7c673cae FG |
1166 | /** |
1167 | * Get a pointer to the per-lcore default mempool cache. | |
1168 | * | |
1169 | * @param mp | |
1170 | * A pointer to the mempool structure. | |
1171 | * @param lcore_id | |
1172 | * The logical core id. | |
1173 | * @return | |
1174 | * A pointer to the mempool cache or NULL if disabled or non-EAL thread. | |
1175 | */ | |
9f95a23c | 1176 | static __rte_always_inline struct rte_mempool_cache * |
7c673cae FG |
1177 | rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) |
1178 | { | |
1179 | if (mp->cache_size == 0) | |
1180 | return NULL; | |
1181 | ||
1182 | if (lcore_id >= RTE_MAX_LCORE) | |
1183 | return NULL; | |
1184 | ||
1185 | return &mp->local_cache[lcore_id]; | |
1186 | } | |
1187 | ||
9f95a23c TL |
1188 | /** |
1189 | * Flush a user-owned mempool cache to the specified mempool. | |
1190 | * | |
1191 | * @param cache | |
1192 | * A pointer to the mempool cache. | |
1193 | * @param mp | |
1194 | * A pointer to the mempool. | |
1195 | */ | |
1196 | static __rte_always_inline void | |
1197 | rte_mempool_cache_flush(struct rte_mempool_cache *cache, | |
1198 | struct rte_mempool *mp) | |
1199 | { | |
1200 | if (cache == NULL) | |
1201 | cache = rte_mempool_default_cache(mp, rte_lcore_id()); | |
1202 | if (cache == NULL || cache->len == 0) | |
1203 | return; | |
1204 | rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); | |
1205 | cache->len = 0; | |
1206 | } | |
1207 | ||
7c673cae FG |
1208 | /** |
1209 | * @internal Put several objects back in the mempool; used internally. | |
1210 | * @param mp | |
1211 | * A pointer to the mempool structure. | |
1212 | * @param obj_table | |
1213 | * A pointer to a table of void * pointers (objects). | |
1214 | * @param n | |
1215 | * The number of objects to store back in the mempool, must be strictly | |
1216 | * positive. | |
1217 | * @param cache | |
1218 | * A pointer to a mempool cache structure. May be NULL if not needed. | |
7c673cae | 1219 | */ |
9f95a23c | 1220 | static __rte_always_inline void |
7c673cae | 1221 | __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, |
9f95a23c | 1222 | unsigned int n, struct rte_mempool_cache *cache) |
7c673cae FG |
1223 | { |
1224 | void **cache_objs; | |
1225 | ||
1226 | /* increment stat now, adding in mempool always success */ | |
1227 | __MEMPOOL_STAT_ADD(mp, put, n); | |
1228 | ||
11fdf7f2 TL |
1229 | /* No cache provided or if put would overflow mem allocated for cache */ |
1230 | if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) | |
7c673cae FG |
1231 | goto ring_enqueue; |
1232 | ||
1233 | cache_objs = &cache->objs[cache->len]; | |
1234 | ||
1235 | /* | |
1236 | * The cache follows the following algorithm | |
1237 | * 1. Add the objects to the cache | |
1238 | * 2. Anything greater than the cache min value (if it crosses the | |
1239 | * cache flush threshold) is flushed to the ring. | |
1240 | */ | |
1241 | ||
1242 | /* Add elements back into the cache */ | |
1243 | rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n); | |
1244 | ||
1245 | cache->len += n; | |
1246 | ||
1247 | if (cache->len >= cache->flushthresh) { | |
1248 | rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], | |
1249 | cache->len - cache->size); | |
1250 | cache->len = cache->size; | |
1251 | } | |
1252 | ||
1253 | return; | |
1254 | ||
1255 | ring_enqueue: | |
1256 | ||
1257 | /* push remaining objects in ring */ | |
1258 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG | |
1259 | if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0) | |
1260 | rte_panic("cannot put objects in mempool\n"); | |
1261 | #else | |
1262 | rte_mempool_ops_enqueue_bulk(mp, obj_table, n); | |
1263 | #endif | |
1264 | } | |
1265 | ||
1266 | ||
1267 | /** | |
1268 | * Put several objects back in the mempool. | |
1269 | * | |
1270 | * @param mp | |
1271 | * A pointer to the mempool structure. | |
1272 | * @param obj_table | |
1273 | * A pointer to a table of void * pointers (objects). | |
1274 | * @param n | |
1275 | * The number of objects to add in the mempool from the obj_table. | |
1276 | * @param cache | |
1277 | * A pointer to a mempool cache structure. May be NULL if not needed. | |
7c673cae | 1278 | */ |
9f95a23c | 1279 | static __rte_always_inline void |
7c673cae | 1280 | rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, |
9f95a23c | 1281 | unsigned int n, struct rte_mempool_cache *cache) |
7c673cae FG |
1282 | { |
1283 | __mempool_check_cookies(mp, obj_table, n, 0); | |
11fdf7f2 | 1284 | __mempool_generic_put(mp, obj_table, n, cache); |
7c673cae FG |
1285 | } |
1286 | ||
1287 | /** | |
1288 | * Put several objects back in the mempool. | |
1289 | * | |
1290 | * This function calls the multi-producer or the single-producer | |
1291 | * version depending on the default behavior that was specified at | |
1292 | * mempool creation time (see flags). | |
1293 | * | |
1294 | * @param mp | |
1295 | * A pointer to the mempool structure. | |
1296 | * @param obj_table | |
1297 | * A pointer to a table of void * pointers (objects). | |
1298 | * @param n | |
1299 | * The number of objects to add in the mempool from obj_table. | |
1300 | */ | |
9f95a23c | 1301 | static __rte_always_inline void |
7c673cae | 1302 | rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, |
9f95a23c | 1303 | unsigned int n) |
7c673cae FG |
1304 | { |
1305 | struct rte_mempool_cache *cache; | |
1306 | cache = rte_mempool_default_cache(mp, rte_lcore_id()); | |
9f95a23c | 1307 | rte_mempool_generic_put(mp, obj_table, n, cache); |
7c673cae FG |
1308 | } |
1309 | ||
7c673cae FG |
1310 | /** |
1311 | * Put one object back in the mempool. | |
1312 | * | |
1313 | * This function calls the multi-producer or the single-producer | |
1314 | * version depending on the default behavior that was specified at | |
1315 | * mempool creation time (see flags). | |
1316 | * | |
1317 | * @param mp | |
1318 | * A pointer to the mempool structure. | |
1319 | * @param obj | |
1320 | * A pointer to the object to be added. | |
1321 | */ | |
9f95a23c | 1322 | static __rte_always_inline void |
7c673cae FG |
1323 | rte_mempool_put(struct rte_mempool *mp, void *obj) |
1324 | { | |
1325 | rte_mempool_put_bulk(mp, &obj, 1); | |
1326 | } | |
1327 | ||
1328 | /** | |
1329 | * @internal Get several objects from the mempool; used internally. | |
1330 | * @param mp | |
1331 | * A pointer to the mempool structure. | |
1332 | * @param obj_table | |
1333 | * A pointer to a table of void * pointers (objects). | |
1334 | * @param n | |
1335 | * The number of objects to get, must be strictly positive. | |
1336 | * @param cache | |
1337 | * A pointer to a mempool cache structure. May be NULL if not needed. | |
7c673cae FG |
1338 | * @return |
1339 | * - >=0: Success; number of objects supplied. | |
1340 | * - <0: Error; code of ring dequeue function. | |
1341 | */ | |
9f95a23c | 1342 | static __rte_always_inline int |
7c673cae | 1343 | __mempool_generic_get(struct rte_mempool *mp, void **obj_table, |
9f95a23c | 1344 | unsigned int n, struct rte_mempool_cache *cache) |
7c673cae FG |
1345 | { |
1346 | int ret; | |
1347 | uint32_t index, len; | |
1348 | void **cache_objs; | |
1349 | ||
11fdf7f2 TL |
1350 | /* No cache provided or cannot be satisfied from cache */ |
1351 | if (unlikely(cache == NULL || n >= cache->size)) | |
7c673cae FG |
1352 | goto ring_dequeue; |
1353 | ||
1354 | cache_objs = cache->objs; | |
1355 | ||
1356 | /* Can this be satisfied from the cache? */ | |
1357 | if (cache->len < n) { | |
1358 | /* No. Backfill the cache first, and then fill from it */ | |
1359 | uint32_t req = n + (cache->size - cache->len); | |
1360 | ||
1361 | /* How many do we require i.e. number to fill the cache + the request */ | |
1362 | ret = rte_mempool_ops_dequeue_bulk(mp, | |
1363 | &cache->objs[cache->len], req); | |
1364 | if (unlikely(ret < 0)) { | |
1365 | /* | |
9f95a23c | 1366 | * In the off chance that we are buffer constrained, |
7c673cae FG |
1367 | * where we are not able to allocate cache + n, go to |
1368 | * the ring directly. If that fails, we are truly out of | |
1369 | * buffers. | |
1370 | */ | |
1371 | goto ring_dequeue; | |
1372 | } | |
1373 | ||
1374 | cache->len += req; | |
1375 | } | |
1376 | ||
1377 | /* Now fill in the response ... */ | |
1378 | for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++) | |
1379 | *obj_table = cache_objs[len]; | |
1380 | ||
1381 | cache->len -= n; | |
1382 | ||
1383 | __MEMPOOL_STAT_ADD(mp, get_success, n); | |
1384 | ||
1385 | return 0; | |
1386 | ||
1387 | ring_dequeue: | |
1388 | ||
1389 | /* get remaining objects from ring */ | |
1390 | ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n); | |
1391 | ||
1392 | if (ret < 0) | |
1393 | __MEMPOOL_STAT_ADD(mp, get_fail, n); | |
1394 | else | |
1395 | __MEMPOOL_STAT_ADD(mp, get_success, n); | |
1396 | ||
1397 | return ret; | |
1398 | } | |
1399 | ||
1400 | /** | |
1401 | * Get several objects from the mempool. | |
1402 | * | |
1403 | * If cache is enabled, objects will be retrieved first from cache, | |
1404 | * subsequently from the common pool. Note that it can return -ENOENT when | |
1405 | * the local cache and common pool are empty, even if cache from other | |
1406 | * lcores are full. | |
1407 | * | |
1408 | * @param mp | |
1409 | * A pointer to the mempool structure. | |
1410 | * @param obj_table | |
1411 | * A pointer to a table of void * pointers (objects) that will be filled. | |
1412 | * @param n | |
1413 | * The number of objects to get from mempool to obj_table. | |
1414 | * @param cache | |
1415 | * A pointer to a mempool cache structure. May be NULL if not needed. | |
7c673cae FG |
1416 | * @return |
1417 | * - 0: Success; objects taken. | |
1418 | * - -ENOENT: Not enough entries in the mempool; no object is retrieved. | |
1419 | */ | |
9f95a23c TL |
1420 | static __rte_always_inline int |
1421 | rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, | |
1422 | unsigned int n, struct rte_mempool_cache *cache) | |
7c673cae FG |
1423 | { |
1424 | int ret; | |
11fdf7f2 | 1425 | ret = __mempool_generic_get(mp, obj_table, n, cache); |
7c673cae FG |
1426 | if (ret == 0) |
1427 | __mempool_check_cookies(mp, obj_table, n, 1); | |
1428 | return ret; | |
1429 | } | |
1430 | ||
7c673cae FG |
1431 | /** |
1432 | * Get several objects from the mempool. | |
1433 | * | |
1434 | * This function calls the multi-consumers or the single-consumer | |
1435 | * version, depending on the default behaviour that was specified at | |
1436 | * mempool creation time (see flags). | |
1437 | * | |
1438 | * If cache is enabled, objects will be retrieved first from cache, | |
1439 | * subsequently from the common pool. Note that it can return -ENOENT when | |
1440 | * the local cache and common pool are empty, even if cache from other | |
1441 | * lcores are full. | |
1442 | * | |
1443 | * @param mp | |
1444 | * A pointer to the mempool structure. | |
1445 | * @param obj_table | |
1446 | * A pointer to a table of void * pointers (objects) that will be filled. | |
1447 | * @param n | |
1448 | * The number of objects to get from the mempool to obj_table. | |
1449 | * @return | |
1450 | * - 0: Success; objects taken | |
1451 | * - -ENOENT: Not enough entries in the mempool; no object is retrieved. | |
1452 | */ | |
9f95a23c TL |
1453 | static __rte_always_inline int |
1454 | rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) | |
7c673cae FG |
1455 | { |
1456 | struct rte_mempool_cache *cache; | |
1457 | cache = rte_mempool_default_cache(mp, rte_lcore_id()); | |
9f95a23c | 1458 | return rte_mempool_generic_get(mp, obj_table, n, cache); |
7c673cae FG |
1459 | } |
1460 | ||
7c673cae FG |
1461 | /** |
1462 | * Get one object from the mempool. | |
1463 | * | |
1464 | * This function calls the multi-consumers or the single-consumer | |
1465 | * version, depending on the default behavior that was specified at | |
1466 | * mempool creation (see flags). | |
1467 | * | |
1468 | * If cache is enabled, objects will be retrieved first from cache, | |
1469 | * subsequently from the common pool. Note that it can return -ENOENT when | |
1470 | * the local cache and common pool are empty, even if cache from other | |
1471 | * lcores are full. | |
1472 | * | |
1473 | * @param mp | |
1474 | * A pointer to the mempool structure. | |
1475 | * @param obj_p | |
1476 | * A pointer to a void * pointer (object) that will be filled. | |
1477 | * @return | |
1478 | * - 0: Success; objects taken. | |
1479 | * - -ENOENT: Not enough entries in the mempool; no object is retrieved. | |
1480 | */ | |
9f95a23c | 1481 | static __rte_always_inline int |
7c673cae FG |
1482 | rte_mempool_get(struct rte_mempool *mp, void **obj_p) |
1483 | { | |
1484 | return rte_mempool_get_bulk(mp, obj_p, 1); | |
1485 | } | |
1486 | ||
9f95a23c TL |
1487 | /** |
1488 | * @warning | |
1489 | * @b EXPERIMENTAL: this API may change without prior notice. | |
1490 | * | |
1491 | * Get a contiguous blocks of objects from the mempool. | |
1492 | * | |
1493 | * If cache is enabled, consider to flush it first, to reuse objects | |
1494 | * as soon as possible. | |
1495 | * | |
1496 | * The application should check that the driver supports the operation | |
1497 | * by calling rte_mempool_ops_get_info() and checking that `contig_block_size` | |
1498 | * is not zero. | |
1499 | * | |
1500 | * @param mp | |
1501 | * A pointer to the mempool structure. | |
1502 | * @param first_obj_table | |
1503 | * A pointer to a pointer to the first object in each block. | |
1504 | * @param n | |
1505 | * The number of blocks to get from mempool. | |
1506 | * @return | |
1507 | * - 0: Success; blocks taken. | |
1508 | * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. | |
1509 | * - -EOPNOTSUPP: The mempool driver does not support block dequeue | |
1510 | */ | |
1511 | static __rte_always_inline int | |
1512 | __rte_experimental | |
1513 | rte_mempool_get_contig_blocks(struct rte_mempool *mp, | |
1514 | void **first_obj_table, unsigned int n) | |
1515 | { | |
1516 | int ret; | |
1517 | ||
1518 | ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); | |
1519 | if (ret == 0) { | |
1520 | __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n); | |
1521 | __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, | |
1522 | 1); | |
1523 | } else { | |
1524 | __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); | |
1525 | } | |
1526 | ||
1527 | return ret; | |
1528 | } | |
1529 | ||
7c673cae FG |
1530 | /** |
1531 | * Return the number of entries in the mempool. | |
1532 | * | |
1533 | * When cache is enabled, this function has to browse the length of | |
1534 | * all lcores, so it should not be used in a data path, but only for | |
1535 | * debug purposes. User-owned mempool caches are not accounted for. | |
1536 | * | |
1537 | * @param mp | |
1538 | * A pointer to the mempool structure. | |
1539 | * @return | |
1540 | * The number of entries in the mempool. | |
1541 | */ | |
1542 | unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); | |
1543 | ||
7c673cae FG |
1544 | /** |
1545 | * Return the number of elements which have been allocated from the mempool | |
1546 | * | |
1547 | * When cache is enabled, this function has to browse the length of | |
1548 | * all lcores, so it should not be used in a data path, but only for | |
1549 | * debug purposes. | |
1550 | * | |
1551 | * @param mp | |
1552 | * A pointer to the mempool structure. | |
1553 | * @return | |
1554 | * The number of free entries in the mempool. | |
1555 | */ | |
1556 | unsigned int | |
1557 | rte_mempool_in_use_count(const struct rte_mempool *mp); | |
1558 | ||
7c673cae FG |
1559 | /** |
1560 | * Test if the mempool is full. | |
1561 | * | |
1562 | * When cache is enabled, this function has to browse the length of all | |
1563 | * lcores, so it should not be used in a data path, but only for debug | |
1564 | * purposes. User-owned mempool caches are not accounted for. | |
1565 | * | |
1566 | * @param mp | |
1567 | * A pointer to the mempool structure. | |
1568 | * @return | |
1569 | * - 1: The mempool is full. | |
1570 | * - 0: The mempool is not full. | |
1571 | */ | |
1572 | static inline int | |
1573 | rte_mempool_full(const struct rte_mempool *mp) | |
1574 | { | |
1575 | return !!(rte_mempool_avail_count(mp) == mp->size); | |
1576 | } | |
1577 | ||
1578 | /** | |
1579 | * Test if the mempool is empty. | |
1580 | * | |
1581 | * When cache is enabled, this function has to browse the length of all | |
1582 | * lcores, so it should not be used in a data path, but only for debug | |
1583 | * purposes. User-owned mempool caches are not accounted for. | |
1584 | * | |
1585 | * @param mp | |
1586 | * A pointer to the mempool structure. | |
1587 | * @return | |
1588 | * - 1: The mempool is empty. | |
1589 | * - 0: The mempool is not empty. | |
1590 | */ | |
1591 | static inline int | |
1592 | rte_mempool_empty(const struct rte_mempool *mp) | |
1593 | { | |
1594 | return !!(rte_mempool_avail_count(mp) == 0); | |
1595 | } | |
1596 | ||
1597 | /** | |
9f95a23c | 1598 | * Return the IO address of elt, which is an element of the pool mp. |
7c673cae | 1599 | * |
7c673cae FG |
1600 | * @param elt |
1601 | * A pointer (virtual address) to the element of the pool. | |
1602 | * @return | |
9f95a23c TL |
1603 | * The IO address of the elt element. |
1604 | * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the | |
1605 | * returned value is RTE_BAD_IOVA. | |
7c673cae | 1606 | */ |
9f95a23c TL |
1607 | static inline rte_iova_t |
1608 | rte_mempool_virt2iova(const void *elt) | |
7c673cae FG |
1609 | { |
1610 | const struct rte_mempool_objhdr *hdr; | |
1611 | hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, | |
1612 | sizeof(*hdr)); | |
9f95a23c | 1613 | return hdr->iova; |
7c673cae FG |
1614 | } |
1615 | ||
1616 | /** | |
1617 | * Check the consistency of mempool objects. | |
1618 | * | |
1619 | * Verify the coherency of fields in the mempool structure. Also check | |
1620 | * that the cookies of mempool objects (even the ones that are not | |
1621 | * present in pool) have a correct value. If not, a panic will occur. | |
1622 | * | |
1623 | * @param mp | |
1624 | * A pointer to the mempool structure. | |
1625 | */ | |
1626 | void rte_mempool_audit(struct rte_mempool *mp); | |
1627 | ||
1628 | /** | |
1629 | * Return a pointer to the private data in an mempool structure. | |
1630 | * | |
1631 | * @param mp | |
1632 | * A pointer to the mempool structure. | |
1633 | * @return | |
1634 | * A pointer to the private data. | |
1635 | */ | |
1636 | static inline void *rte_mempool_get_priv(struct rte_mempool *mp) | |
1637 | { | |
1638 | return (char *)mp + | |
1639 | MEMPOOL_HEADER_SIZE(mp, mp->cache_size); | |
1640 | } | |
1641 | ||
1642 | /** | |
1643 | * Dump the status of all mempools on the console | |
1644 | * | |
1645 | * @param f | |
1646 | * A pointer to a file for output | |
1647 | */ | |
1648 | void rte_mempool_list_dump(FILE *f); | |
1649 | ||
1650 | /** | |
1651 | * Search a mempool from its name | |
1652 | * | |
1653 | * @param name | |
1654 | * The name of the mempool. | |
1655 | * @return | |
1656 | * The pointer to the mempool matching the name, or NULL if not found. | |
1657 | * NULL on error | |
1658 | * with rte_errno set appropriately. Possible rte_errno values include: | |
1659 | * - ENOENT - required entry not available to return. | |
1660 | * | |
1661 | */ | |
1662 | struct rte_mempool *rte_mempool_lookup(const char *name); | |
1663 | ||
1664 | /** | |
1665 | * Get the header, trailer and total size of a mempool element. | |
1666 | * | |
1667 | * Given a desired size of the mempool element and mempool flags, | |
1668 | * calculates header, trailer, body and total sizes of the mempool object. | |
1669 | * | |
1670 | * @param elt_size | |
1671 | * The size of each element, without header and trailer. | |
1672 | * @param flags | |
1673 | * The flags used for the mempool creation. | |
1674 | * Consult rte_mempool_create() for more information about possible values. | |
1675 | * The size of each element. | |
1676 | * @param sz | |
1677 | * The calculated detailed size the mempool object. May be NULL. | |
1678 | * @return | |
1679 | * Total size of the mempool object. | |
1680 | */ | |
1681 | uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, | |
1682 | struct rte_mempool_objsz *sz); | |
1683 | ||
7c673cae FG |
1684 | /** |
1685 | * Walk list of all memory pools | |
1686 | * | |
1687 | * @param func | |
1688 | * Iterator function | |
1689 | * @param arg | |
1690 | * Argument passed to iterator | |
1691 | */ | |
1692 | void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), | |
1693 | void *arg); | |
1694 | ||
1695 | #ifdef __cplusplus | |
1696 | } | |
1697 | #endif | |
1698 | ||
1699 | #endif /* _RTE_MEMPOOL_H_ */ |