1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
14 #include <sys/queue.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_launch.h>
26 #include <rte_eal_memconfig.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_errno.h>
31 #include <rte_string_fns.h>
32 #include <rte_spinlock.h>
34 #include "rte_mempool.h"
36 TAILQ_HEAD(rte_mempool_list
, rte_tailq_entry
);
38 static struct rte_tailq_elem rte_mempool_tailq
= {
39 .name
= "RTE_MEMPOOL",
41 EAL_REGISTER_TAILQ(rte_mempool_tailq
)
43 #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
44 #define CALC_CACHE_FLUSHTHRESH(c) \
45 ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
48 * return the greatest common divisor between a and b (fast algorithm)
51 static unsigned get_gcd(unsigned a
, unsigned b
)
76 * Depending on memory configuration, objects addresses are spread
77 * between channels and ranks in RAM: the pool allocator will add
78 * padding between objects. This function return the new size of the
81 static unsigned optimize_object_size(unsigned obj_size
)
83 unsigned nrank
, nchan
;
84 unsigned new_obj_size
;
86 /* get number of channels */
87 nchan
= rte_memory_get_nchannel();
91 nrank
= rte_memory_get_nrank();
95 /* process new object size */
96 new_obj_size
= (obj_size
+ RTE_MEMPOOL_ALIGN_MASK
) / RTE_MEMPOOL_ALIGN
;
97 while (get_gcd(new_obj_size
, nrank
* nchan
) != 1)
99 return new_obj_size
* RTE_MEMPOOL_ALIGN
;
103 find_min_pagesz(const struct rte_memseg_list
*msl
, void *arg
)
107 if (msl
->page_sz
< *min
)
114 get_min_page_size(void)
116 size_t min_pagesz
= SIZE_MAX
;
118 rte_memseg_list_walk(find_min_pagesz
, &min_pagesz
);
120 return min_pagesz
== SIZE_MAX
? (size_t) getpagesize() : min_pagesz
;
125 mempool_add_elem(struct rte_mempool
*mp
, __rte_unused
void *opaque
,
126 void *obj
, rte_iova_t iova
)
128 struct rte_mempool_objhdr
*hdr
;
129 struct rte_mempool_objtlr
*tlr __rte_unused
;
131 /* set mempool ptr in header */
132 hdr
= RTE_PTR_SUB(obj
, sizeof(*hdr
));
135 STAILQ_INSERT_TAIL(&mp
->elt_list
, hdr
, next
);
136 mp
->populated_size
++;
138 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
139 hdr
->cookie
= RTE_MEMPOOL_HEADER_COOKIE2
;
140 tlr
= __mempool_get_trailer(obj
);
141 tlr
->cookie
= RTE_MEMPOOL_TRAILER_COOKIE
;
145 /* call obj_cb() for each mempool element */
147 rte_mempool_obj_iter(struct rte_mempool
*mp
,
148 rte_mempool_obj_cb_t
*obj_cb
, void *obj_cb_arg
)
150 struct rte_mempool_objhdr
*hdr
;
154 STAILQ_FOREACH(hdr
, &mp
->elt_list
, next
) {
155 obj
= (char *)hdr
+ sizeof(*hdr
);
156 obj_cb(mp
, obj_cb_arg
, obj
, n
);
163 /* call mem_cb() for each mempool memory chunk */
165 rte_mempool_mem_iter(struct rte_mempool
*mp
,
166 rte_mempool_mem_cb_t
*mem_cb
, void *mem_cb_arg
)
168 struct rte_mempool_memhdr
*hdr
;
171 STAILQ_FOREACH(hdr
, &mp
->mem_list
, next
) {
172 mem_cb(mp
, mem_cb_arg
, hdr
, n
);
179 /* get the header, trailer and total size of a mempool element. */
181 rte_mempool_calc_obj_size(uint32_t elt_size
, uint32_t flags
,
182 struct rte_mempool_objsz
*sz
)
184 struct rte_mempool_objsz lsz
;
186 sz
= (sz
!= NULL
) ? sz
: &lsz
;
188 sz
->header_size
= sizeof(struct rte_mempool_objhdr
);
189 if ((flags
& MEMPOOL_F_NO_CACHE_ALIGN
) == 0)
190 sz
->header_size
= RTE_ALIGN_CEIL(sz
->header_size
,
193 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
194 sz
->trailer_size
= sizeof(struct rte_mempool_objtlr
);
196 sz
->trailer_size
= 0;
199 /* element size is 8 bytes-aligned at least */
200 sz
->elt_size
= RTE_ALIGN_CEIL(elt_size
, sizeof(uint64_t));
202 /* expand trailer to next cache line */
203 if ((flags
& MEMPOOL_F_NO_CACHE_ALIGN
) == 0) {
204 sz
->total_size
= sz
->header_size
+ sz
->elt_size
+
206 sz
->trailer_size
+= ((RTE_MEMPOOL_ALIGN
-
207 (sz
->total_size
& RTE_MEMPOOL_ALIGN_MASK
)) &
208 RTE_MEMPOOL_ALIGN_MASK
);
212 * increase trailer to add padding between objects in order to
213 * spread them across memory channels/ranks
215 if ((flags
& MEMPOOL_F_NO_SPREAD
) == 0) {
217 new_size
= optimize_object_size(sz
->header_size
+ sz
->elt_size
+
219 sz
->trailer_size
= new_size
- sz
->header_size
- sz
->elt_size
;
222 /* this is the size of an object, including header and trailer */
223 sz
->total_size
= sz
->header_size
+ sz
->elt_size
+ sz
->trailer_size
;
225 return sz
->total_size
;
228 /* free a memchunk allocated with rte_memzone_reserve() */
230 rte_mempool_memchunk_mz_free(__rte_unused
struct rte_mempool_memhdr
*memhdr
,
233 const struct rte_memzone
*mz
= opaque
;
234 rte_memzone_free(mz
);
237 /* Free memory chunks used by a mempool. Objects must be in pool */
239 rte_mempool_free_memchunks(struct rte_mempool
*mp
)
241 struct rte_mempool_memhdr
*memhdr
;
244 while (!STAILQ_EMPTY(&mp
->elt_list
)) {
245 rte_mempool_ops_dequeue_bulk(mp
, &elt
, 1);
247 STAILQ_REMOVE_HEAD(&mp
->elt_list
, next
);
248 mp
->populated_size
--;
251 while (!STAILQ_EMPTY(&mp
->mem_list
)) {
252 memhdr
= STAILQ_FIRST(&mp
->mem_list
);
253 STAILQ_REMOVE_HEAD(&mp
->mem_list
, next
);
254 if (memhdr
->free_cb
!= NULL
)
255 memhdr
->free_cb(memhdr
, memhdr
->opaque
);
262 mempool_ops_alloc_once(struct rte_mempool
*mp
)
266 /* create the internal ring if not already done */
267 if ((mp
->flags
& MEMPOOL_F_POOL_CREATED
) == 0) {
268 ret
= rte_mempool_ops_alloc(mp
);
271 mp
->flags
|= MEMPOOL_F_POOL_CREATED
;
276 /* Add objects in the pool, using a physically contiguous memory
277 * zone. Return the number of objects added, or a negative value
281 rte_mempool_populate_iova(struct rte_mempool
*mp
, char *vaddr
,
282 rte_iova_t iova
, size_t len
, rte_mempool_memchunk_free_cb_t
*free_cb
,
287 struct rte_mempool_memhdr
*memhdr
;
290 ret
= mempool_ops_alloc_once(mp
);
294 /* mempool is already populated */
295 if (mp
->populated_size
>= mp
->size
)
298 memhdr
= rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr
), 0);
303 memhdr
->addr
= vaddr
;
306 memhdr
->free_cb
= free_cb
;
307 memhdr
->opaque
= opaque
;
309 if (mp
->flags
& MEMPOOL_F_NO_CACHE_ALIGN
)
310 off
= RTE_PTR_ALIGN_CEIL(vaddr
, 8) - vaddr
;
312 off
= RTE_PTR_ALIGN_CEIL(vaddr
, RTE_CACHE_LINE_SIZE
) - vaddr
;
319 i
= rte_mempool_ops_populate(mp
, mp
->size
- mp
->populated_size
,
321 (iova
== RTE_BAD_IOVA
) ? RTE_BAD_IOVA
: (iova
+ off
),
322 len
- off
, mempool_add_elem
, NULL
);
324 /* not enough room to store one object */
330 STAILQ_INSERT_TAIL(&mp
->mem_list
, memhdr
, next
);
339 /* Populate the mempool with a virtual area. Return the number of
340 * objects added, or a negative value on error.
343 rte_mempool_populate_virt(struct rte_mempool
*mp
, char *addr
,
344 size_t len
, size_t pg_sz
, rte_mempool_memchunk_free_cb_t
*free_cb
,
348 size_t off
, phys_len
;
351 /* address and len must be page-aligned */
352 if (RTE_PTR_ALIGN_CEIL(addr
, pg_sz
) != addr
)
354 if (RTE_ALIGN_CEIL(len
, pg_sz
) != len
)
357 if (mp
->flags
& MEMPOOL_F_NO_IOVA_CONTIG
)
358 return rte_mempool_populate_iova(mp
, addr
, RTE_BAD_IOVA
,
359 len
, free_cb
, opaque
);
361 for (off
= 0; off
+ pg_sz
<= len
&&
362 mp
->populated_size
< mp
->size
; off
+= phys_len
) {
364 iova
= rte_mem_virt2iova(addr
+ off
);
366 if (iova
== RTE_BAD_IOVA
&& rte_eal_has_hugepages()) {
371 /* populate with the largest group of contiguous pages */
372 for (phys_len
= pg_sz
; off
+ phys_len
< len
; phys_len
+= pg_sz
) {
375 iova_tmp
= rte_mem_virt2iova(addr
+ off
+ phys_len
);
377 if (iova_tmp
!= iova
+ phys_len
)
381 ret
= rte_mempool_populate_iova(mp
, addr
+ off
, iova
,
382 phys_len
, free_cb
, opaque
);
385 /* no need to call the free callback for next chunks */
393 rte_mempool_free_memchunks(mp
);
397 /* Default function to populate the mempool: allocate memory in memzones,
398 * and populate them. Return the number of objects added, or a negative
402 rte_mempool_populate_default(struct rte_mempool
*mp
)
404 unsigned int mz_flags
= RTE_MEMZONE_1GB
|RTE_MEMZONE_SIZE_HINT_ONLY
;
405 char mz_name
[RTE_MEMZONE_NAMESIZE
];
406 const struct rte_memzone
*mz
;
408 size_t align
, pg_sz
, pg_shift
;
412 bool no_contig
, try_contig
, no_pageshift
;
414 ret
= mempool_ops_alloc_once(mp
);
418 /* mempool must not be populated */
419 if (mp
->nb_mem_chunks
!= 0)
422 no_contig
= mp
->flags
& MEMPOOL_F_NO_IOVA_CONTIG
;
425 * the following section calculates page shift and page size values.
427 * these values impact the result of calc_mem_size operation, which
428 * returns the amount of memory that should be allocated to store the
429 * desired number of objects. when not zero, it allocates more memory
430 * for the padding between objects, to ensure that an object does not
431 * cross a page boundary. in other words, page size/shift are to be set
432 * to zero if mempool elements won't care about page boundaries.
433 * there are several considerations for page size and page shift here.
435 * if we don't need our mempools to have physically contiguous objects,
436 * then just set page shift and page size to 0, because the user has
437 * indicated that there's no need to care about anything.
439 * if we do need contiguous objects, there is also an option to reserve
440 * the entire mempool memory as one contiguous block of memory, in
441 * which case the page shift and alignment wouldn't matter as well.
443 * if we require contiguous objects, but not necessarily the entire
444 * mempool reserved space to be contiguous, then there are two options.
446 * if our IO addresses are virtual, not actual physical (IOVA as VA
447 * case), then no page shift needed - our memory allocation will give us
448 * contiguous IO memory as far as the hardware is concerned, so
449 * act as if we're getting contiguous memory.
451 * if our IO addresses are physical, we may get memory from bigger
452 * pages, or we might get memory from smaller pages, and how much of it
453 * we require depends on whether we want bigger or smaller pages.
454 * However, requesting each and every memory size is too much work, so
455 * what we'll do instead is walk through the page sizes available, pick
456 * the smallest one and set up page shift to match that one. We will be
457 * wasting some space this way, but it's much nicer than looping around
458 * trying to reserve each and every page size.
460 * However, since size calculation will produce page-aligned sizes, it
461 * makes sense to first try and see if we can reserve the entire memzone
462 * in one contiguous chunk as well (otherwise we might end up wasting a
463 * 1G page on a 10MB memzone). If we fail to get enough contiguous
464 * memory, then we'll go and reserve space page-by-page.
466 no_pageshift
= no_contig
|| rte_eal_iova_mode() == RTE_IOVA_VA
;
467 try_contig
= !no_contig
&& !no_pageshift
&& rte_eal_has_hugepages();
472 } else if (try_contig
) {
473 pg_sz
= get_min_page_size();
474 pg_shift
= rte_bsf32(pg_sz
);
476 pg_sz
= getpagesize();
477 pg_shift
= rte_bsf32(pg_sz
);
480 for (mz_id
= 0, n
= mp
->size
; n
> 0; mz_id
++, n
-= ret
) {
481 size_t min_chunk_size
;
484 if (try_contig
|| no_pageshift
)
485 mem_size
= rte_mempool_ops_calc_mem_size(mp
, n
,
486 0, &min_chunk_size
, &align
);
488 mem_size
= rte_mempool_ops_calc_mem_size(mp
, n
,
489 pg_shift
, &min_chunk_size
, &align
);
496 ret
= snprintf(mz_name
, sizeof(mz_name
),
497 RTE_MEMPOOL_MZ_FORMAT
"_%d", mp
->name
, mz_id
);
498 if (ret
< 0 || ret
>= (int)sizeof(mz_name
)) {
505 /* if we're trying to reserve contiguous memory, add appropriate
509 flags
|= RTE_MEMZONE_IOVA_CONTIG
;
511 mz
= rte_memzone_reserve_aligned(mz_name
, mem_size
,
512 mp
->socket_id
, flags
, align
);
514 /* if we were trying to allocate contiguous memory, failed and
515 * minimum required contiguous chunk fits minimum page, adjust
516 * memzone size to the page size, and try again.
518 if (mz
== NULL
&& try_contig
&& min_chunk_size
<= pg_sz
) {
520 flags
&= ~RTE_MEMZONE_IOVA_CONTIG
;
522 mem_size
= rte_mempool_ops_calc_mem_size(mp
, n
,
523 pg_shift
, &min_chunk_size
, &align
);
529 mz
= rte_memzone_reserve_aligned(mz_name
, mem_size
,
530 mp
->socket_id
, flags
, align
);
532 /* don't try reserving with 0 size if we were asked to reserve
533 * IOVA-contiguous memory.
535 if (min_chunk_size
< (size_t)mem_size
&& mz
== NULL
) {
536 /* not enough memory, retry with the biggest zone we
539 mz
= rte_memzone_reserve_aligned(mz_name
, 0,
540 mp
->socket_id
, flags
,
541 RTE_MAX(pg_sz
, align
));
548 if (mz
->len
< min_chunk_size
) {
549 rte_memzone_free(mz
);
559 if (no_pageshift
|| try_contig
)
560 ret
= rte_mempool_populate_iova(mp
, mz
->addr
,
562 rte_mempool_memchunk_mz_free
,
563 (void *)(uintptr_t)mz
);
565 ret
= rte_mempool_populate_virt(mp
, mz
->addr
,
566 RTE_ALIGN_FLOOR(mz
->len
, pg_sz
), pg_sz
,
567 rte_mempool_memchunk_mz_free
,
568 (void *)(uintptr_t)mz
);
570 rte_memzone_free(mz
);
578 rte_mempool_free_memchunks(mp
);
582 /* return the memory size required for mempool objects in anonymous mem */
584 get_anon_size(const struct rte_mempool
*mp
)
587 size_t pg_sz
, pg_shift
;
588 size_t min_chunk_size
;
591 pg_sz
= getpagesize();
592 pg_shift
= rte_bsf32(pg_sz
);
593 size
= rte_mempool_ops_calc_mem_size(mp
, mp
->size
, pg_shift
,
594 &min_chunk_size
, &align
);
599 /* unmap a memory zone mapped by rte_mempool_populate_anon() */
601 rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr
*memhdr
,
607 * Calculate size since memhdr->len has contiguous chunk length
608 * which may be smaller if anon map is split into many contiguous
609 * chunks. Result must be the same as we calculated on populate.
611 size
= get_anon_size(memhdr
->mp
);
615 munmap(opaque
, size
);
618 /* populate the mempool with an anonymous mapping */
620 rte_mempool_populate_anon(struct rte_mempool
*mp
)
626 /* mempool is already populated, error */
627 if ((!STAILQ_EMPTY(&mp
->mem_list
)) || mp
->nb_mem_chunks
!= 0) {
632 ret
= mempool_ops_alloc_once(mp
);
636 size
= get_anon_size(mp
);
642 /* get chunk of virtually continuous memory */
643 addr
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
644 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
645 if (addr
== MAP_FAILED
) {
649 /* can't use MMAP_LOCKED, it does not exist on BSD */
650 if (mlock(addr
, size
) < 0) {
656 ret
= rte_mempool_populate_virt(mp
, addr
, size
, getpagesize(),
657 rte_mempool_memchunk_anon_free
, addr
);
661 return mp
->populated_size
;
664 rte_mempool_free_memchunks(mp
);
670 rte_mempool_free(struct rte_mempool
*mp
)
672 struct rte_mempool_list
*mempool_list
= NULL
;
673 struct rte_tailq_entry
*te
;
678 mempool_list
= RTE_TAILQ_CAST(rte_mempool_tailq
.head
, rte_mempool_list
);
679 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK
);
680 /* find out tailq entry */
681 TAILQ_FOREACH(te
, mempool_list
, next
) {
682 if (te
->data
== (void *)mp
)
687 TAILQ_REMOVE(mempool_list
, te
, next
);
690 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK
);
692 rte_mempool_free_memchunks(mp
);
693 rte_mempool_ops_free(mp
);
694 rte_memzone_free(mp
->mz
);
698 mempool_cache_init(struct rte_mempool_cache
*cache
, uint32_t size
)
701 cache
->flushthresh
= CALC_CACHE_FLUSHTHRESH(size
);
706 * Create and initialize a cache for objects that are retrieved from and
707 * returned to an underlying mempool. This structure is identical to the
708 * local_cache[lcore_id] pointed to by the mempool structure.
710 struct rte_mempool_cache
*
711 rte_mempool_cache_create(uint32_t size
, int socket_id
)
713 struct rte_mempool_cache
*cache
;
715 if (size
== 0 || size
> RTE_MEMPOOL_CACHE_MAX_SIZE
) {
720 cache
= rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache
),
721 RTE_CACHE_LINE_SIZE
, socket_id
);
723 RTE_LOG(ERR
, MEMPOOL
, "Cannot allocate mempool cache.\n");
728 mempool_cache_init(cache
, size
);
734 * Free a cache. It's the responsibility of the user to make sure that any
735 * remaining objects in the cache are flushed to the corresponding
739 rte_mempool_cache_free(struct rte_mempool_cache
*cache
)
744 /* create an empty mempool */
746 rte_mempool_create_empty(const char *name
, unsigned n
, unsigned elt_size
,
747 unsigned cache_size
, unsigned private_data_size
,
748 int socket_id
, unsigned flags
)
750 char mz_name
[RTE_MEMZONE_NAMESIZE
];
751 struct rte_mempool_list
*mempool_list
;
752 struct rte_mempool
*mp
= NULL
;
753 struct rte_tailq_entry
*te
= NULL
;
754 const struct rte_memzone
*mz
= NULL
;
756 unsigned int mz_flags
= RTE_MEMZONE_1GB
|RTE_MEMZONE_SIZE_HINT_ONLY
;
757 struct rte_mempool_objsz objsz
;
761 /* compilation-time checks */
762 RTE_BUILD_BUG_ON((sizeof(struct rte_mempool
) &
763 RTE_CACHE_LINE_MASK
) != 0);
764 RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache
) &
765 RTE_CACHE_LINE_MASK
) != 0);
766 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
767 RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats
) &
768 RTE_CACHE_LINE_MASK
) != 0);
769 RTE_BUILD_BUG_ON((offsetof(struct rte_mempool
, stats
) &
770 RTE_CACHE_LINE_MASK
) != 0);
773 mempool_list
= RTE_TAILQ_CAST(rte_mempool_tailq
.head
, rte_mempool_list
);
775 /* asked for zero items */
781 /* asked cache too big */
782 if (cache_size
> RTE_MEMPOOL_CACHE_MAX_SIZE
||
783 CALC_CACHE_FLUSHTHRESH(cache_size
) > n
) {
788 /* "no cache align" imply "no spread" */
789 if (flags
& MEMPOOL_F_NO_CACHE_ALIGN
)
790 flags
|= MEMPOOL_F_NO_SPREAD
;
792 /* calculate mempool object sizes. */
793 if (!rte_mempool_calc_obj_size(elt_size
, flags
, &objsz
)) {
798 rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK
);
801 * reserve a memory zone for this mempool: private data is
804 private_data_size
= (private_data_size
+
805 RTE_MEMPOOL_ALIGN_MASK
) & (~RTE_MEMPOOL_ALIGN_MASK
);
808 /* try to allocate tailq entry */
809 te
= rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te
), 0);
811 RTE_LOG(ERR
, MEMPOOL
, "Cannot allocate tailq entry!\n");
815 mempool_size
= MEMPOOL_HEADER_SIZE(mp
, cache_size
);
816 mempool_size
+= private_data_size
;
817 mempool_size
= RTE_ALIGN_CEIL(mempool_size
, RTE_MEMPOOL_ALIGN
);
819 ret
= snprintf(mz_name
, sizeof(mz_name
), RTE_MEMPOOL_MZ_FORMAT
, name
);
820 if (ret
< 0 || ret
>= (int)sizeof(mz_name
)) {
821 rte_errno
= ENAMETOOLONG
;
825 mz
= rte_memzone_reserve(mz_name
, mempool_size
, socket_id
, mz_flags
);
829 /* init the mempool structure */
831 memset(mp
, 0, MEMPOOL_HEADER_SIZE(mp
, cache_size
));
832 ret
= snprintf(mp
->name
, sizeof(mp
->name
), "%s", name
);
833 if (ret
< 0 || ret
>= (int)sizeof(mp
->name
)) {
834 rte_errno
= ENAMETOOLONG
;
840 mp
->socket_id
= socket_id
;
841 mp
->elt_size
= objsz
.elt_size
;
842 mp
->header_size
= objsz
.header_size
;
843 mp
->trailer_size
= objsz
.trailer_size
;
844 /* Size of default caches, zero means disabled. */
845 mp
->cache_size
= cache_size
;
846 mp
->private_data_size
= private_data_size
;
847 STAILQ_INIT(&mp
->elt_list
);
848 STAILQ_INIT(&mp
->mem_list
);
851 * local_cache pointer is set even if cache_size is zero.
852 * The local_cache points to just past the elt_pa[] array.
854 mp
->local_cache
= (struct rte_mempool_cache
*)
855 RTE_PTR_ADD(mp
, MEMPOOL_HEADER_SIZE(mp
, 0));
857 /* Init all default caches. */
858 if (cache_size
!= 0) {
859 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++)
860 mempool_cache_init(&mp
->local_cache
[lcore_id
],
866 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK
);
867 TAILQ_INSERT_TAIL(mempool_list
, te
, next
);
868 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK
);
869 rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK
);
874 rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK
);
876 rte_mempool_free(mp
);
880 /* create the mempool */
882 rte_mempool_create(const char *name
, unsigned n
, unsigned elt_size
,
883 unsigned cache_size
, unsigned private_data_size
,
884 rte_mempool_ctor_t
*mp_init
, void *mp_init_arg
,
885 rte_mempool_obj_cb_t
*obj_init
, void *obj_init_arg
,
886 int socket_id
, unsigned flags
)
889 struct rte_mempool
*mp
;
891 mp
= rte_mempool_create_empty(name
, n
, elt_size
, cache_size
,
892 private_data_size
, socket_id
, flags
);
897 * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
898 * set the correct index into the table of ops structs.
900 if ((flags
& MEMPOOL_F_SP_PUT
) && (flags
& MEMPOOL_F_SC_GET
))
901 ret
= rte_mempool_set_ops_byname(mp
, "ring_sp_sc", NULL
);
902 else if (flags
& MEMPOOL_F_SP_PUT
)
903 ret
= rte_mempool_set_ops_byname(mp
, "ring_sp_mc", NULL
);
904 else if (flags
& MEMPOOL_F_SC_GET
)
905 ret
= rte_mempool_set_ops_byname(mp
, "ring_mp_sc", NULL
);
907 ret
= rte_mempool_set_ops_byname(mp
, "ring_mp_mc", NULL
);
912 /* call the mempool priv initializer */
914 mp_init(mp
, mp_init_arg
);
916 if (rte_mempool_populate_default(mp
) < 0)
919 /* call the object initializers */
921 rte_mempool_obj_iter(mp
, obj_init
, obj_init_arg
);
926 rte_mempool_free(mp
);
930 /* Return the number of entries in the mempool */
932 rte_mempool_avail_count(const struct rte_mempool
*mp
)
937 count
= rte_mempool_ops_get_count(mp
);
939 if (mp
->cache_size
== 0)
942 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++)
943 count
+= mp
->local_cache
[lcore_id
].len
;
946 * due to race condition (access to len is not locked), the
947 * total can be greater than size... so fix the result
949 if (count
> mp
->size
)
954 /* return the number of entries allocated from the mempool */
956 rte_mempool_in_use_count(const struct rte_mempool
*mp
)
958 return mp
->size
- rte_mempool_avail_count(mp
);
961 /* dump the cache status */
963 rte_mempool_dump_cache(FILE *f
, const struct rte_mempool
*mp
)
967 unsigned cache_count
;
969 fprintf(f
, " internal cache infos:\n");
970 fprintf(f
, " cache_size=%"PRIu32
"\n", mp
->cache_size
);
972 if (mp
->cache_size
== 0)
975 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
976 cache_count
= mp
->local_cache
[lcore_id
].len
;
977 fprintf(f
, " cache_count[%u]=%"PRIu32
"\n",
978 lcore_id
, cache_count
);
979 count
+= cache_count
;
981 fprintf(f
, " total_cache_count=%u\n", count
);
985 #ifndef __INTEL_COMPILER
986 #pragma GCC diagnostic ignored "-Wcast-qual"
989 /* check and update cookies or panic (internal) */
990 void rte_mempool_check_cookies(const struct rte_mempool
*mp
,
991 void * const *obj_table_const
, unsigned n
, int free
)
993 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
994 struct rte_mempool_objhdr
*hdr
;
995 struct rte_mempool_objtlr
*tlr
;
1001 /* Force to drop the "const" attribute. This is done only when
1002 * DEBUG is enabled */
1003 tmp
= (void *) obj_table_const
;
1009 if (rte_mempool_from_obj(obj
) != mp
)
1010 rte_panic("MEMPOOL: object is owned by another "
1013 hdr
= __mempool_get_header(obj
);
1014 cookie
= hdr
->cookie
;
1017 if (cookie
!= RTE_MEMPOOL_HEADER_COOKIE1
) {
1018 RTE_LOG(CRIT
, MEMPOOL
,
1019 "obj=%p, mempool=%p, cookie=%" PRIx64
"\n",
1020 obj
, (const void *) mp
, cookie
);
1021 rte_panic("MEMPOOL: bad header cookie (put)\n");
1023 hdr
->cookie
= RTE_MEMPOOL_HEADER_COOKIE2
;
1024 } else if (free
== 1) {
1025 if (cookie
!= RTE_MEMPOOL_HEADER_COOKIE2
) {
1026 RTE_LOG(CRIT
, MEMPOOL
,
1027 "obj=%p, mempool=%p, cookie=%" PRIx64
"\n",
1028 obj
, (const void *) mp
, cookie
);
1029 rte_panic("MEMPOOL: bad header cookie (get)\n");
1031 hdr
->cookie
= RTE_MEMPOOL_HEADER_COOKIE1
;
1032 } else if (free
== 2) {
1033 if (cookie
!= RTE_MEMPOOL_HEADER_COOKIE1
&&
1034 cookie
!= RTE_MEMPOOL_HEADER_COOKIE2
) {
1035 RTE_LOG(CRIT
, MEMPOOL
,
1036 "obj=%p, mempool=%p, cookie=%" PRIx64
"\n",
1037 obj
, (const void *) mp
, cookie
);
1038 rte_panic("MEMPOOL: bad header cookie (audit)\n");
1041 tlr
= __mempool_get_trailer(obj
);
1042 cookie
= tlr
->cookie
;
1043 if (cookie
!= RTE_MEMPOOL_TRAILER_COOKIE
) {
1044 RTE_LOG(CRIT
, MEMPOOL
,
1045 "obj=%p, mempool=%p, cookie=%" PRIx64
"\n",
1046 obj
, (const void *) mp
, cookie
);
1047 rte_panic("MEMPOOL: bad trailer cookie\n");
1052 RTE_SET_USED(obj_table_const
);
1059 rte_mempool_contig_blocks_check_cookies(const struct rte_mempool
*mp
,
1060 void * const *first_obj_table_const
, unsigned int n
, int free
)
1062 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1063 struct rte_mempool_info info
;
1064 const size_t total_elt_sz
=
1065 mp
->header_size
+ mp
->elt_size
+ mp
->trailer_size
;
1068 rte_mempool_ops_get_info(mp
, &info
);
1070 for (i
= 0; i
< n
; ++i
) {
1071 void *first_obj
= first_obj_table_const
[i
];
1073 for (j
= 0; j
< info
.contig_block_size
; ++j
) {
1076 obj
= (void *)((uintptr_t)first_obj
+ j
* total_elt_sz
);
1077 rte_mempool_check_cookies(mp
, &obj
, 1, free
);
1082 RTE_SET_USED(first_obj_table_const
);
1088 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1090 mempool_obj_audit(struct rte_mempool
*mp
, __rte_unused
void *opaque
,
1091 void *obj
, __rte_unused
unsigned idx
)
1093 __mempool_check_cookies(mp
, &obj
, 1, 2);
1097 mempool_audit_cookies(struct rte_mempool
*mp
)
1101 num
= rte_mempool_obj_iter(mp
, mempool_obj_audit
, NULL
);
1102 if (num
!= mp
->size
) {
1103 rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
1104 "iterated only over %u elements\n",
1109 #define mempool_audit_cookies(mp) do {} while(0)
1112 #ifndef __INTEL_COMPILER
1113 #pragma GCC diagnostic error "-Wcast-qual"
1116 /* check cookies before and after objects */
1118 mempool_audit_cache(const struct rte_mempool
*mp
)
1120 /* check cache size consistency */
1123 if (mp
->cache_size
== 0)
1126 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1127 const struct rte_mempool_cache
*cache
;
1128 cache
= &mp
->local_cache
[lcore_id
];
1129 if (cache
->len
> cache
->flushthresh
) {
1130 RTE_LOG(CRIT
, MEMPOOL
, "badness on cache[%u]\n",
1132 rte_panic("MEMPOOL: invalid cache len\n");
1137 /* check the consistency of mempool (size, cookies, ...) */
1139 rte_mempool_audit(struct rte_mempool
*mp
)
1141 mempool_audit_cache(mp
);
1142 mempool_audit_cookies(mp
);
1144 /* For case where mempool DEBUG is not set, and cache size is 0 */
1148 /* dump the status of the mempool on the console */
1150 rte_mempool_dump(FILE *f
, struct rte_mempool
*mp
)
1152 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1153 struct rte_mempool_info info
;
1154 struct rte_mempool_debug_stats sum
;
1157 struct rte_mempool_memhdr
*memhdr
;
1158 unsigned common_count
;
1159 unsigned cache_count
;
1162 RTE_ASSERT(f
!= NULL
);
1163 RTE_ASSERT(mp
!= NULL
);
1165 fprintf(f
, "mempool <%s>@%p\n", mp
->name
, mp
);
1166 fprintf(f
, " flags=%x\n", mp
->flags
);
1167 fprintf(f
, " pool=%p\n", mp
->pool_data
);
1168 fprintf(f
, " iova=0x%" PRIx64
"\n", mp
->mz
->iova
);
1169 fprintf(f
, " nb_mem_chunks=%u\n", mp
->nb_mem_chunks
);
1170 fprintf(f
, " size=%"PRIu32
"\n", mp
->size
);
1171 fprintf(f
, " populated_size=%"PRIu32
"\n", mp
->populated_size
);
1172 fprintf(f
, " header_size=%"PRIu32
"\n", mp
->header_size
);
1173 fprintf(f
, " elt_size=%"PRIu32
"\n", mp
->elt_size
);
1174 fprintf(f
, " trailer_size=%"PRIu32
"\n", mp
->trailer_size
);
1175 fprintf(f
, " total_obj_size=%"PRIu32
"\n",
1176 mp
->header_size
+ mp
->elt_size
+ mp
->trailer_size
);
1178 fprintf(f
, " private_data_size=%"PRIu32
"\n", mp
->private_data_size
);
1180 STAILQ_FOREACH(memhdr
, &mp
->mem_list
, next
)
1181 mem_len
+= memhdr
->len
;
1183 fprintf(f
, " avg bytes/object=%#Lf\n",
1184 (long double)mem_len
/ mp
->size
);
1187 cache_count
= rte_mempool_dump_cache(f
, mp
);
1188 common_count
= rte_mempool_ops_get_count(mp
);
1189 if ((cache_count
+ common_count
) > mp
->size
)
1190 common_count
= mp
->size
- cache_count
;
1191 fprintf(f
, " common_pool_count=%u\n", common_count
);
1193 /* sum and dump statistics */
1194 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1195 rte_mempool_ops_get_info(mp
, &info
);
1196 memset(&sum
, 0, sizeof(sum
));
1197 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1198 sum
.put_bulk
+= mp
->stats
[lcore_id
].put_bulk
;
1199 sum
.put_objs
+= mp
->stats
[lcore_id
].put_objs
;
1200 sum
.get_success_bulk
+= mp
->stats
[lcore_id
].get_success_bulk
;
1201 sum
.get_success_objs
+= mp
->stats
[lcore_id
].get_success_objs
;
1202 sum
.get_fail_bulk
+= mp
->stats
[lcore_id
].get_fail_bulk
;
1203 sum
.get_fail_objs
+= mp
->stats
[lcore_id
].get_fail_objs
;
1204 sum
.get_success_blks
+= mp
->stats
[lcore_id
].get_success_blks
;
1205 sum
.get_fail_blks
+= mp
->stats
[lcore_id
].get_fail_blks
;
1207 fprintf(f
, " stats:\n");
1208 fprintf(f
, " put_bulk=%"PRIu64
"\n", sum
.put_bulk
);
1209 fprintf(f
, " put_objs=%"PRIu64
"\n", sum
.put_objs
);
1210 fprintf(f
, " get_success_bulk=%"PRIu64
"\n", sum
.get_success_bulk
);
1211 fprintf(f
, " get_success_objs=%"PRIu64
"\n", sum
.get_success_objs
);
1212 fprintf(f
, " get_fail_bulk=%"PRIu64
"\n", sum
.get_fail_bulk
);
1213 fprintf(f
, " get_fail_objs=%"PRIu64
"\n", sum
.get_fail_objs
);
1214 if (info
.contig_block_size
> 0) {
1215 fprintf(f
, " get_success_blks=%"PRIu64
"\n",
1216 sum
.get_success_blks
);
1217 fprintf(f
, " get_fail_blks=%"PRIu64
"\n", sum
.get_fail_blks
);
1220 fprintf(f
, " no statistics available\n");
1223 rte_mempool_audit(mp
);
1226 /* dump the status of all mempools on the console */
1228 rte_mempool_list_dump(FILE *f
)
1230 struct rte_mempool
*mp
= NULL
;
1231 struct rte_tailq_entry
*te
;
1232 struct rte_mempool_list
*mempool_list
;
1234 mempool_list
= RTE_TAILQ_CAST(rte_mempool_tailq
.head
, rte_mempool_list
);
1236 rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK
);
1238 TAILQ_FOREACH(te
, mempool_list
, next
) {
1239 mp
= (struct rte_mempool
*) te
->data
;
1240 rte_mempool_dump(f
, mp
);
1243 rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK
);
1246 /* search a mempool from its name */
1247 struct rte_mempool
*
1248 rte_mempool_lookup(const char *name
)
1250 struct rte_mempool
*mp
= NULL
;
1251 struct rte_tailq_entry
*te
;
1252 struct rte_mempool_list
*mempool_list
;
1254 mempool_list
= RTE_TAILQ_CAST(rte_mempool_tailq
.head
, rte_mempool_list
);
1256 rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK
);
1258 TAILQ_FOREACH(te
, mempool_list
, next
) {
1259 mp
= (struct rte_mempool
*) te
->data
;
1260 if (strncmp(name
, mp
->name
, RTE_MEMPOOL_NAMESIZE
) == 0)
1264 rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK
);
1274 void rte_mempool_walk(void (*func
)(struct rte_mempool
*, void *),
1277 struct rte_tailq_entry
*te
= NULL
;
1278 struct rte_mempool_list
*mempool_list
;
1281 mempool_list
= RTE_TAILQ_CAST(rte_mempool_tailq
.head
, rte_mempool_list
);
1283 rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK
);
1285 TAILQ_FOREACH_SAFE(te
, mempool_list
, next
, tmp_te
) {
1286 (*func
)((struct rte_mempool
*) te
->data
, arg
);
1289 rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK
);