]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_eal/common/include/rte_memory.h
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / common / include / rte_memory.h
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5#ifndef _RTE_MEMORY_H_
6#define _RTE_MEMORY_H_
7
8/**
9 * @file
10 *
11 * Memory-related RTE API.
12 */
13
14#include <stdint.h>
15#include <stddef.h>
16#include <stdio.h>
17
18#ifdef __cplusplus
19extern "C" {
20#endif
21
22#include <rte_common.h>
23#include <rte_compat.h>
24#include <rte_config.h>
25
26/* forward declaration for pointers */
27struct rte_memseg_list;
28
29__extension__
30enum rte_page_sizes {
31 RTE_PGSIZE_4K = 1ULL << 12,
32 RTE_PGSIZE_64K = 1ULL << 16,
33 RTE_PGSIZE_256K = 1ULL << 18,
34 RTE_PGSIZE_2M = 1ULL << 21,
35 RTE_PGSIZE_16M = 1ULL << 24,
36 RTE_PGSIZE_256M = 1ULL << 28,
37 RTE_PGSIZE_512M = 1ULL << 29,
38 RTE_PGSIZE_1G = 1ULL << 30,
39 RTE_PGSIZE_4G = 1ULL << 32,
40 RTE_PGSIZE_16G = 1ULL << 34,
41};
42
43#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
44#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
45
46#define RTE_CACHE_LINE_ROUNDUP(size) \
47 (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
48/**< Return the first cache-aligned value greater or equal to size. */
49
50/**< Cache line size in terms of log2 */
51#if RTE_CACHE_LINE_SIZE == 64
52#define RTE_CACHE_LINE_SIZE_LOG2 6
53#elif RTE_CACHE_LINE_SIZE == 128
54#define RTE_CACHE_LINE_SIZE_LOG2 7
55#else
56#error "Unsupported cache line size"
57#endif
58
59#define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */
60
61/**
62 * Force alignment to cache line.
63 */
64#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
65
66/**
67 * Force minimum cache line alignment.
68 */
69#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE)
70
71typedef uint64_t phys_addr_t; /**< Physical address. */
72#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
73/**
74 * IO virtual address type.
75 * When the physical addressing mode (IOVA as PA) is in use,
76 * the translation from an IO virtual address (IOVA) to a physical address
77 * is a direct mapping, i.e. the same value.
78 * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation.
79 */
80typedef uint64_t rte_iova_t;
81#define RTE_BAD_IOVA ((rte_iova_t)-1)
82
83/**
84 * Physical memory segment descriptor.
85 */
86#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
87/**< Prevent this segment from being freed back to the OS. */
88struct rte_memseg {
89 RTE_STD_C11
90 union {
91 phys_addr_t phys_addr; /**< deprecated - Start physical address. */
92 rte_iova_t iova; /**< Start IO address. */
93 };
94 RTE_STD_C11
95 union {
96 void *addr; /**< Start virtual address. */
97 uint64_t addr_64; /**< Makes sure addr is always 64 bits */
98 };
99 size_t len; /**< Length of the segment. */
100 uint64_t hugepage_sz; /**< The pagesize of underlying memory */
101 int32_t socket_id; /**< NUMA socket ID. */
102 uint32_t nchannel; /**< Number of channels. */
103 uint32_t nrank; /**< Number of ranks. */
104 uint32_t flags; /**< Memseg-specific flags */
105} __rte_packed;
106
107/**
108 * Lock page in physical memory and prevent from swapping.
109 *
110 * @param virt
111 * The virtual address.
112 * @return
113 * 0 on success, negative on error.
114 */
115int rte_mem_lock_page(const void *virt);
116
117/**
118 * Get physical address of any mapped virtual address in the current process.
119 * It is found by browsing the /proc/self/pagemap special file.
120 * The page must be locked.
121 *
122 * @param virt
123 * The virtual address.
124 * @return
125 * The physical address or RTE_BAD_IOVA on error.
126 */
127phys_addr_t rte_mem_virt2phy(const void *virt);
128
129/**
130 * Get IO virtual address of any mapped virtual address in the current process.
131 *
132 * @param virt
133 * The virtual address.
134 * @return
135 * The IO address or RTE_BAD_IOVA on error.
136 */
137rte_iova_t rte_mem_virt2iova(const void *virt);
138
139/**
140 * Get virtual memory address corresponding to iova address.
141 *
142 * @note This function read-locks the memory hotplug subsystem, and thus cannot
143 * be used within memory-related callback functions.
144 *
145 * @param iova
146 * The iova address.
147 * @return
148 * Virtual address corresponding to iova address (or NULL if address does not
149 * exist within DPDK memory map).
150 */
151__rte_experimental void *
152rte_mem_iova2virt(rte_iova_t iova);
153
154/**
155 * Get memseg to which a particular virtual address belongs.
156 *
157 * @param virt
158 * The virtual address.
159 * @param msl
160 * The memseg list in which to look up based on ``virt`` address
161 * (can be NULL).
162 * @return
163 * Memseg pointer on success, or NULL on error.
164 */
165__rte_experimental struct rte_memseg *
166rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
167
168/**
169 * Get memseg list corresponding to virtual memory address.
170 *
171 * @param virt
172 * The virtual address.
173 * @return
174 * Memseg list to which this virtual address belongs to.
175 */
176__rte_experimental struct rte_memseg_list *
177rte_mem_virt2memseg_list(const void *virt);
178
179/**
180 * Memseg walk function prototype.
181 *
182 * Returning 0 will continue walk
183 * Returning 1 will stop the walk
184 * Returning -1 will stop the walk and report error
185 */
186typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl,
187 const struct rte_memseg *ms, void *arg);
188
189/**
190 * Memseg contig walk function prototype. This will trigger a callback on every
191 * VA-contiguous are starting at memseg ``ms``, so total valid VA space at each
192 * callback call will be [``ms->addr``, ``ms->addr + len``).
193 *
194 * Returning 0 will continue walk
195 * Returning 1 will stop the walk
196 * Returning -1 will stop the walk and report error
197 */
198typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl,
199 const struct rte_memseg *ms, size_t len, void *arg);
200
201/**
202 * Memseg list walk function prototype. This will trigger a callback on every
203 * allocated memseg list.
204 *
205 * Returning 0 will continue walk
206 * Returning 1 will stop the walk
207 * Returning -1 will stop the walk and report error
208 */
209typedef int (*rte_memseg_list_walk_t)(const struct rte_memseg_list *msl,
210 void *arg);
211
212/**
213 * Walk list of all memsegs.
214 *
215 * @note This function read-locks the memory hotplug subsystem, and thus cannot
216 * be used within memory-related callback functions.
217 *
9f95a23c
TL
218 * @note This function will also walk through externally allocated segments. It
219 * is up to the user to decide whether to skip through these segments.
220 *
11fdf7f2
TL
221 * @param func
222 * Iterator function
223 * @param arg
224 * Argument passed to iterator
225 * @return
226 * 0 if walked over the entire list
227 * 1 if stopped by the user
228 * -1 if user function reported error
229 */
230int __rte_experimental
231rte_memseg_walk(rte_memseg_walk_t func, void *arg);
232
233/**
234 * Walk each VA-contiguous area.
235 *
236 * @note This function read-locks the memory hotplug subsystem, and thus cannot
237 * be used within memory-related callback functions.
238 *
9f95a23c
TL
239 * @note This function will also walk through externally allocated segments. It
240 * is up to the user to decide whether to skip through these segments.
241 *
11fdf7f2
TL
242 * @param func
243 * Iterator function
244 * @param arg
245 * Argument passed to iterator
246 * @return
247 * 0 if walked over the entire list
248 * 1 if stopped by the user
249 * -1 if user function reported error
250 */
251int __rte_experimental
252rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg);
253
254/**
255 * Walk each allocated memseg list.
256 *
257 * @note This function read-locks the memory hotplug subsystem, and thus cannot
258 * be used within memory-related callback functions.
259 *
9f95a23c
TL
260 * @note This function will also walk through externally allocated segments. It
261 * is up to the user to decide whether to skip through these segments.
262 *
11fdf7f2
TL
263 * @param func
264 * Iterator function
265 * @param arg
266 * Argument passed to iterator
267 * @return
268 * 0 if walked over the entire list
269 * 1 if stopped by the user
270 * -1 if user function reported error
271 */
272int __rte_experimental
273rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
274
275/**
276 * Walk list of all memsegs without performing any locking.
277 *
278 * @note This function does not perform any locking, and is only safe to call
279 * from within memory-related callback functions.
280 *
281 * @param func
282 * Iterator function
283 * @param arg
284 * Argument passed to iterator
285 * @return
286 * 0 if walked over the entire list
287 * 1 if stopped by the user
288 * -1 if user function reported error
289 */
290int __rte_experimental
291rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
292
293/**
294 * Walk each VA-contiguous area without performing any locking.
295 *
296 * @note This function does not perform any locking, and is only safe to call
297 * from within memory-related callback functions.
298 *
299 * @param func
300 * Iterator function
301 * @param arg
302 * Argument passed to iterator
303 * @return
304 * 0 if walked over the entire list
305 * 1 if stopped by the user
306 * -1 if user function reported error
307 */
308int __rte_experimental
309rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
310
311/**
312 * Walk each allocated memseg list without performing any locking.
313 *
314 * @note This function does not perform any locking, and is only safe to call
315 * from within memory-related callback functions.
316 *
317 * @param func
318 * Iterator function
319 * @param arg
320 * Argument passed to iterator
321 * @return
322 * 0 if walked over the entire list
323 * 1 if stopped by the user
324 * -1 if user function reported error
325 */
326int __rte_experimental
327rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg);
328
9f95a23c
TL
329/**
330 * Return file descriptor associated with a particular memseg (if available).
331 *
332 * @note This function read-locks the memory hotplug subsystem, and thus cannot
333 * be used within memory-related callback functions.
334 *
335 * @note This returns an internal file descriptor. Performing any operations on
336 * this file descriptor is inherently dangerous, so it should be treated
337 * as read-only for all intents and purposes.
338 *
339 * @param ms
340 * A pointer to memseg for which to get file descriptor.
341 *
342 * @return
343 * Valid file descriptor in case of success.
344 * -1 in case of error, with ``rte_errno`` set to the following values:
345 * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg
346 * - ENODEV - ``ms`` fd is not available
347 * - ENOENT - ``ms`` is an unused segment
348 * - ENOTSUP - segment fd's are not supported
349 */
350int __rte_experimental
351rte_memseg_get_fd(const struct rte_memseg *ms);
352
353/**
354 * Return file descriptor associated with a particular memseg (if available).
355 *
356 * @note This function does not perform any locking, and is only safe to call
357 * from within memory-related callback functions.
358 *
359 * @note This returns an internal file descriptor. Performing any operations on
360 * this file descriptor is inherently dangerous, so it should be treated
361 * as read-only for all intents and purposes.
362 *
363 * @param ms
364 * A pointer to memseg for which to get file descriptor.
365 *
366 * @return
367 * Valid file descriptor in case of success.
368 * -1 in case of error, with ``rte_errno`` set to the following values:
369 * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg
370 * - ENODEV - ``ms`` fd is not available
371 * - ENOENT - ``ms`` is an unused segment
372 * - ENOTSUP - segment fd's are not supported
373 */
374int __rte_experimental
375rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms);
376
377/**
378 * Get offset into segment file descriptor associated with a particular memseg
379 * (if available).
380 *
381 * @note This function read-locks the memory hotplug subsystem, and thus cannot
382 * be used within memory-related callback functions.
383 *
384 * @param ms
385 * A pointer to memseg for which to get file descriptor.
386 * @param offset
387 * A pointer to offset value where the result will be stored.
388 *
389 * @return
390 * Valid file descriptor in case of success.
391 * -1 in case of error, with ``rte_errno`` set to the following values:
392 * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg
393 * - EINVAL - ``offset`` pointer was NULL
394 * - ENODEV - ``ms`` fd is not available
395 * - ENOENT - ``ms`` is an unused segment
396 * - ENOTSUP - segment fd's are not supported
397 */
398int __rte_experimental
399rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset);
400
401/**
402 * Get offset into segment file descriptor associated with a particular memseg
403 * (if available).
404 *
405 * @note This function does not perform any locking, and is only safe to call
406 * from within memory-related callback functions.
407 *
408 * @param ms
409 * A pointer to memseg for which to get file descriptor.
410 * @param offset
411 * A pointer to offset value where the result will be stored.
412 *
413 * @return
414 * Valid file descriptor in case of success.
415 * -1 in case of error, with ``rte_errno`` set to the following values:
416 * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg
417 * - EINVAL - ``offset`` pointer was NULL
418 * - ENODEV - ``ms`` fd is not available
419 * - ENOENT - ``ms`` is an unused segment
420 * - ENOTSUP - segment fd's are not supported
421 */
422int __rte_experimental
423rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
424 size_t *offset);
425
426/**
427 * @warning
428 * @b EXPERIMENTAL: this API may change without prior notice
429 *
430 * Register external memory chunk with DPDK.
431 *
432 * @note Using this API is mutually exclusive with ``rte_malloc`` family of
433 * API's.
434 *
435 * @note This API will not perform any DMA mapping. It is expected that user
436 * will do that themselves.
437 *
438 * @note Before accessing this memory in other processes, it needs to be
439 * attached in each of those processes by calling ``rte_extmem_attach`` in
440 * each other process.
441 *
442 * @param va_addr
443 * Start of virtual area to register. Must be aligned by ``page_sz``.
444 * @param len
445 * Length of virtual area to register. Must be aligned by ``page_sz``.
446 * @param iova_addrs
447 * Array of page IOVA addresses corresponding to each page in this memory
448 * area. Can be NULL, in which case page IOVA addresses will be set to
449 * RTE_BAD_IOVA.
450 * @param n_pages
451 * Number of elements in the iova_addrs array. Ignored if ``iova_addrs``
452 * is NULL.
453 * @param page_sz
454 * Page size of the underlying memory
455 *
456 * @return
457 * - 0 on success
458 * - -1 in case of error, with rte_errno set to one of the following:
459 * EINVAL - one of the parameters was invalid
460 * EEXIST - memory chunk is already registered
461 * ENOSPC - no more space in internal config to store a new memory chunk
462 */
463int __rte_experimental
464rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
465 unsigned int n_pages, size_t page_sz);
466
467/**
468 * @warning
469 * @b EXPERIMENTAL: this API may change without prior notice
470 *
471 * Unregister external memory chunk with DPDK.
472 *
473 * @note Using this API is mutually exclusive with ``rte_malloc`` family of
474 * API's.
475 *
476 * @note This API will not perform any DMA unmapping. It is expected that user
477 * will do that themselves.
478 *
479 * @note Before calling this function, all other processes must call
480 * ``rte_extmem_detach`` to detach from the memory area.
481 *
482 * @param va_addr
483 * Start of virtual area to unregister
484 * @param len
485 * Length of virtual area to unregister
486 *
487 * @return
488 * - 0 on success
489 * - -1 in case of error, with rte_errno set to one of the following:
490 * EINVAL - one of the parameters was invalid
491 * ENOENT - memory chunk was not found
492 */
493int __rte_experimental
494rte_extmem_unregister(void *va_addr, size_t len);
495
496/**
497 * @warning
498 * @b EXPERIMENTAL: this API may change without prior notice
499 *
500 * Attach to external memory chunk registered in another process.
501 *
502 * @note Using this API is mutually exclusive with ``rte_malloc`` family of
503 * API's.
504 *
505 * @note This API will not perform any DMA mapping. It is expected that user
506 * will do that themselves.
507 *
508 * @param va_addr
509 * Start of virtual area to register
510 * @param len
511 * Length of virtual area to register
512 *
513 * @return
514 * - 0 on success
515 * - -1 in case of error, with rte_errno set to one of the following:
516 * EINVAL - one of the parameters was invalid
517 * ENOENT - memory chunk was not found
518 */
519int __rte_experimental
520rte_extmem_attach(void *va_addr, size_t len);
521
522/**
523 * @warning
524 * @b EXPERIMENTAL: this API may change without prior notice
525 *
526 * Detach from external memory chunk registered in another process.
527 *
528 * @note Using this API is mutually exclusive with ``rte_malloc`` family of
529 * API's.
530 *
531 * @note This API will not perform any DMA unmapping. It is expected that user
532 * will do that themselves.
533 *
534 * @param va_addr
535 * Start of virtual area to unregister
536 * @param len
537 * Length of virtual area to unregister
538 *
539 * @return
540 * - 0 on success
541 * - -1 in case of error, with rte_errno set to one of the following:
542 * EINVAL - one of the parameters was invalid
543 * ENOENT - memory chunk was not found
544 */
545int __rte_experimental
546rte_extmem_detach(void *va_addr, size_t len);
547
11fdf7f2
TL
548/**
549 * Dump the physical memory layout to a file.
550 *
551 * @note This function read-locks the memory hotplug subsystem, and thus cannot
552 * be used within memory-related callback functions.
553 *
554 * @param f
555 * A pointer to a file for output
556 */
557void rte_dump_physmem_layout(FILE *f);
558
559/**
560 * Get the total amount of available physical memory.
561 *
562 * @note This function read-locks the memory hotplug subsystem, and thus cannot
563 * be used within memory-related callback functions.
564 *
565 * @return
566 * The total amount of available physical memory in bytes.
567 */
568uint64_t rte_eal_get_physmem_size(void);
569
570/**
571 * Get the number of memory channels.
572 *
573 * @return
574 * The number of memory channels on the system. The value is 0 if unknown
575 * or not the same on all devices.
576 */
577unsigned rte_memory_get_nchannel(void);
578
579/**
580 * Get the number of memory ranks.
581 *
582 * @return
583 * The number of memory ranks on the system. The value is 0 if unknown or
584 * not the same on all devices.
585 */
586unsigned rte_memory_get_nrank(void);
587
9f95a23c
TL
588/**
589 * @warning
590 * @b EXPERIMENTAL: this API may change without prior notice
591 *
592 * Check if all currently allocated memory segments are compliant with
593 * supplied DMA address width.
594 *
595 * @param maskbits
596 * Address width to check against.
597 */
598int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
599
600/**
601 * @warning
602 * @b EXPERIMENTAL: this API may change without prior notice
603 *
604 * Check if all currently allocated memory segments are compliant with
605 * supplied DMA address width. This function will use
606 * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying
607 * memory_hotplug_lock will not be acquired avoiding deadlock during
608 * memory initialization.
609 *
610 * This function is just for EAL core memory internal use. Drivers should
611 * use the previous rte_mem_check_dma_mask.
612 *
613 * @param maskbits
614 * Address width to check against.
615 */
616int __rte_experimental rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits);
617
618/**
619 * @warning
620 * @b EXPERIMENTAL: this API may change without prior notice
621 *
622 * Set dma mask to use once memory initialization is done. Previous functions
623 * rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be
624 * used safely until memory has been initialized.
625 */
626void __rte_experimental rte_mem_set_dma_mask(uint8_t maskbits);
627
11fdf7f2
TL
628/**
629 * Drivers based on uio will not load unless physical
630 * addresses are obtainable. It is only possible to get
631 * physical addresses when running as a privileged user.
632 *
633 * @return
634 * 1 if the system is able to obtain physical addresses.
635 * 0 if using DMA addresses through an IOMMU.
636 */
637int rte_eal_using_phys_addrs(void);
638
639
640/**
641 * Enum indicating which kind of memory event has happened. Used by callbacks to
642 * distinguish between memory allocations and deallocations.
643 */
644enum rte_mem_event {
645 RTE_MEM_EVENT_ALLOC = 0, /**< Allocation event. */
646 RTE_MEM_EVENT_FREE, /**< Deallocation event. */
647};
648#define RTE_MEM_EVENT_CALLBACK_NAME_LEN 64
649/**< maximum length of callback name */
650
651/**
652 * Function typedef used to register callbacks for memory events.
653 */
654typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type,
655 const void *addr, size_t len, void *arg);
656
657/**
658 * Function used to register callbacks for memory events.
659 *
660 * @note callbacks will happen while memory hotplug subsystem is write-locked,
661 * therefore some functions (e.g. `rte_memseg_walk()`) will cause a
662 * deadlock when called from within such callbacks.
663 *
664 * @note mem event callbacks not being supported is an expected error condition,
665 * so user code needs to handle this situation. In these cases, return
666 * value will be -1, and rte_errno will be set to ENOTSUP.
667 *
668 * @param name
669 * Name associated with specified callback to be added to the list.
670 *
671 * @param clb
672 * Callback function pointer.
673 *
674 * @param arg
675 * Argument to pass to the callback.
676 *
677 * @return
678 * 0 on successful callback register
679 * -1 on unsuccessful callback register, with rte_errno value indicating
680 * reason for failure.
681 */
682int __rte_experimental
683rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
684 void *arg);
685
686/**
687 * Function used to unregister callbacks for memory events.
688 *
689 * @param name
690 * Name associated with specified callback to be removed from the list.
691 *
692 * @param arg
693 * Argument to look for among callbacks with specified callback name.
694 *
695 * @return
696 * 0 on successful callback unregister
697 * -1 on unsuccessful callback unregister, with rte_errno value indicating
698 * reason for failure.
699 */
700int __rte_experimental
701rte_mem_event_callback_unregister(const char *name, void *arg);
702
703
704#define RTE_MEM_ALLOC_VALIDATOR_NAME_LEN 64
705/**< maximum length of alloc validator name */
706/**
707 * Function typedef used to register memory allocation validation callbacks.
708 *
709 * Returning 0 will allow allocation attempt to continue. Returning -1 will
710 * prevent allocation from succeeding.
711 */
712typedef int (*rte_mem_alloc_validator_t)(int socket_id,
713 size_t cur_limit, size_t new_len);
714
715/**
716 * @brief Register validator callback for memory allocations.
717 *
718 * Callbacks registered by this function will be called right before memory
719 * allocator is about to trigger allocation of more pages from the system if
720 * said allocation will bring total memory usage above specified limit on
721 * specified socket. User will be able to cancel pending allocation if callback
722 * returns -1.
723 *
724 * @note callbacks will happen while memory hotplug subsystem is write-locked,
725 * therefore some functions (e.g. `rte_memseg_walk()`) will cause a
726 * deadlock when called from within such callbacks.
727 *
728 * @note validator callbacks not being supported is an expected error condition,
729 * so user code needs to handle this situation. In these cases, return
730 * value will be -1, and rte_errno will be set to ENOTSUP.
731 *
732 * @param name
733 * Name associated with specified callback to be added to the list.
734 *
735 * @param clb
736 * Callback function pointer.
737 *
738 * @param socket_id
739 * Socket ID on which to watch for allocations.
740 *
741 * @param limit
742 * Limit above which to trigger callbacks.
743 *
744 * @return
745 * 0 on successful callback register
746 * -1 on unsuccessful callback register, with rte_errno value indicating
747 * reason for failure.
748 */
749int __rte_experimental
750rte_mem_alloc_validator_register(const char *name,
751 rte_mem_alloc_validator_t clb, int socket_id, size_t limit);
752
753/**
754 * @brief Unregister validator callback for memory allocations.
755 *
756 * @param name
757 * Name associated with specified callback to be removed from the list.
758 *
759 * @param socket_id
760 * Socket ID on which to watch for allocations.
761 *
762 * @return
763 * 0 on successful callback unregister
764 * -1 on unsuccessful callback unregister, with rte_errno value indicating
765 * reason for failure.
766 */
767int __rte_experimental
768rte_mem_alloc_validator_unregister(const char *name, int socket_id);
769
770#ifdef __cplusplus
771}
772#endif
773
774#endif /* _RTE_MEMORY_H_ */