4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright(c) 2013 6WIND.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name of 6WIND S.A. nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 #define _FILE_OFFSET_BITS 64
75 #include <sys/types.h>
77 #include <sys/queue.h>
82 #include <sys/ioctl.h>
88 #include <rte_memory.h>
89 #include <rte_memzone.h>
90 #include <rte_launch.h>
92 #include <rte_eal_memconfig.h>
93 #include <rte_per_lcore.h>
94 #include <rte_lcore.h>
95 #include <rte_common.h>
96 #include <rte_string_fns.h>
98 #include "eal_private.h"
99 #include "eal_internal_cfg.h"
100 #include "eal_filesystem.h"
101 #include "eal_hugepages.h"
103 #define PFN_MASK_SIZE 8
105 #ifdef RTE_LIBRTE_XEN_DOM0
106 int rte_xen_dom0_supported(void)
108 return internal_config
.xen_dom0_support
;
114 * Huge page mapping under linux
116 * To reserve a big contiguous amount of memory, we use the hugepage
117 * feature of linux. For that, we need to have hugetlbfs mounted. This
118 * code will create many files in this directory (one per page) and
119 * map them in virtual memory. For each page, we will retrieve its
120 * physical address and remap it in order to have a virtual contiguous
121 * zone as well as a physical contiguous zone.
124 static uint64_t baseaddr_offset
;
126 static bool phys_addrs_available
= true;
128 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
131 test_phys_addrs_available(void)
134 phys_addr_t physaddr
;
136 /* For dom0, phys addresses can always be available */
137 if (rte_xen_dom0_supported())
140 physaddr
= rte_mem_virt2phy(&tmp
);
141 if (physaddr
== RTE_BAD_PHYS_ADDR
) {
143 "Cannot obtain physical addresses: %s. "
144 "Only vfio will function.\n",
146 phys_addrs_available
= false;
150 /* Lock page in physical memory and prevent from swapping. */
152 rte_mem_lock_page(const void *virt
)
154 unsigned long virtual = (unsigned long)virt
;
155 int page_size
= getpagesize();
156 unsigned long aligned
= (virtual & ~ (page_size
- 1));
157 return mlock((void*)aligned
, page_size
);
161 * Get physical address of any mapped virtual address in the current process.
164 rte_mem_virt2phy(const void *virtaddr
)
167 uint64_t page
, physaddr
;
168 unsigned long virt_pfn
;
172 /* when using dom0, /proc/self/pagemap always returns 0, check in
173 * dpdk memory by browsing the memsegs */
174 if (rte_xen_dom0_supported()) {
175 struct rte_mem_config
*mcfg
;
176 struct rte_memseg
*memseg
;
179 mcfg
= rte_eal_get_configuration()->mem_config
;
180 for (i
= 0; i
< RTE_MAX_MEMSEG
; i
++) {
181 memseg
= &mcfg
->memseg
[i
];
182 if (memseg
->addr
== NULL
)
184 if (virtaddr
> memseg
->addr
&&
185 virtaddr
< RTE_PTR_ADD(memseg
->addr
,
187 return memseg
->phys_addr
+
188 RTE_PTR_DIFF(virtaddr
, memseg
->addr
);
192 return RTE_BAD_PHYS_ADDR
;
195 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
196 if (!phys_addrs_available
)
197 return RTE_BAD_PHYS_ADDR
;
199 /* standard page size */
200 page_size
= getpagesize();
202 fd
= open("/proc/self/pagemap", O_RDONLY
);
204 RTE_LOG(ERR
, EAL
, "%s(): cannot open /proc/self/pagemap: %s\n",
205 __func__
, strerror(errno
));
206 return RTE_BAD_PHYS_ADDR
;
209 virt_pfn
= (unsigned long)virtaddr
/ page_size
;
210 offset
= sizeof(uint64_t) * virt_pfn
;
211 if (lseek(fd
, offset
, SEEK_SET
) == (off_t
) -1) {
212 RTE_LOG(ERR
, EAL
, "%s(): seek error in /proc/self/pagemap: %s\n",
213 __func__
, strerror(errno
));
215 return RTE_BAD_PHYS_ADDR
;
218 retval
= read(fd
, &page
, PFN_MASK_SIZE
);
221 RTE_LOG(ERR
, EAL
, "%s(): cannot read /proc/self/pagemap: %s\n",
222 __func__
, strerror(errno
));
223 return RTE_BAD_PHYS_ADDR
;
224 } else if (retval
!= PFN_MASK_SIZE
) {
225 RTE_LOG(ERR
, EAL
, "%s(): read %d bytes from /proc/self/pagemap "
226 "but expected %d:\n",
227 __func__
, retval
, PFN_MASK_SIZE
);
228 return RTE_BAD_PHYS_ADDR
;
232 * the pfn (page frame number) are bits 0-54 (see
233 * pagemap.txt in linux Documentation)
235 if ((page
& 0x7fffffffffffffULL
) == 0)
236 return RTE_BAD_PHYS_ADDR
;
238 physaddr
= ((page
& 0x7fffffffffffffULL
) * page_size
)
239 + ((unsigned long)virtaddr
% page_size
);
245 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
246 * it by browsing the /proc/self/pagemap special file.
249 find_physaddrs(struct hugepage_file
*hugepg_tbl
, struct hugepage_info
*hpi
)
254 for (i
= 0; i
< hpi
->num_pages
[0]; i
++) {
255 addr
= rte_mem_virt2phy(hugepg_tbl
[i
].orig_va
);
256 if (addr
== RTE_BAD_PHYS_ADDR
)
258 hugepg_tbl
[i
].physaddr
= addr
;
264 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
267 set_physaddrs(struct hugepage_file
*hugepg_tbl
, struct hugepage_info
*hpi
)
270 static phys_addr_t addr
;
272 for (i
= 0; i
< hpi
->num_pages
[0]; i
++) {
273 hugepg_tbl
[i
].physaddr
= addr
;
274 addr
+= hugepg_tbl
[i
].size
;
280 * Check whether address-space layout randomization is enabled in
281 * the kernel. This is important for multi-process as it can prevent
282 * two processes mapping data to the same virtual address
284 * 0 - address space randomization disabled
285 * 1/2 - address space randomization enabled
286 * negative error code on error
292 int retval
, fd
= open(RANDOMIZE_VA_SPACE_FILE
, O_RDONLY
);
295 retval
= read(fd
, &c
, 1);
305 default: return -EINVAL
;
310 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
311 * pointer to the mmap'd area and keep *size unmodified. Else, retry
312 * with a smaller zone: decrease *size by hugepage_sz until it reaches
313 * 0. In this case, return NULL. Note: this function returns an address
314 * which is a multiple of hugepage size.
317 get_virtual_area(size_t *size
, size_t hugepage_sz
)
323 if (internal_config
.base_virtaddr
!= 0) {
324 addr
= (void*) (uintptr_t) (internal_config
.base_virtaddr
+
329 RTE_LOG(DEBUG
, EAL
, "Ask a virtual area of 0x%zx bytes\n", *size
);
331 fd
= open("/dev/zero", O_RDONLY
);
333 RTE_LOG(ERR
, EAL
, "Cannot open /dev/zero\n");
338 (*size
) + hugepage_sz
, PROT_READ
,
339 #ifdef RTE_ARCH_PPC_64
340 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_HUGETLB
,
345 if (addr
== MAP_FAILED
)
346 *size
-= hugepage_sz
;
347 } while (addr
== MAP_FAILED
&& *size
> 0);
349 if (addr
== MAP_FAILED
) {
351 RTE_LOG(ERR
, EAL
, "Cannot get a virtual area: %s\n",
356 munmap(addr
, (*size
) + hugepage_sz
);
359 /* align addr to a huge page size boundary */
360 aligned_addr
= (long)addr
;
361 aligned_addr
+= (hugepage_sz
- 1);
362 aligned_addr
&= (~(hugepage_sz
- 1));
363 addr
= (void *)(aligned_addr
);
365 RTE_LOG(DEBUG
, EAL
, "Virtual area found at %p (size = 0x%zx)\n",
368 /* increment offset */
369 baseaddr_offset
+= *size
;
374 static sigjmp_buf huge_jmpenv
;
376 static void huge_sigbus_handler(int signo __rte_unused
)
378 siglongjmp(huge_jmpenv
, 1);
381 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
382 * non-static local variable in the stack frame calling sigsetjmp might be
383 * clobbered by a call to longjmp.
385 static int huge_wrap_sigsetjmp(void)
387 return sigsetjmp(huge_jmpenv
, 1);
391 * Mmap all hugepages of hugepage table: it first open a file in
392 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
393 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
394 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
395 * map continguous physical blocks in contiguous virtual blocks.
398 map_all_hugepages(struct hugepage_file
*hugepg_tbl
,
399 struct hugepage_info
*hpi
, int orig
)
404 void *vma_addr
= NULL
;
407 for (i
= 0; i
< hpi
->num_pages
[0]; i
++) {
408 uint64_t hugepage_sz
= hpi
->hugepage_sz
;
411 hugepg_tbl
[i
].file_id
= i
;
412 hugepg_tbl
[i
].size
= hugepage_sz
;
413 eal_get_hugefile_path(hugepg_tbl
[i
].filepath
,
414 sizeof(hugepg_tbl
[i
].filepath
), hpi
->hugedir
,
415 hugepg_tbl
[i
].file_id
);
416 hugepg_tbl
[i
].filepath
[sizeof(hugepg_tbl
[i
].filepath
) - 1] = '\0';
419 /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
420 * original map address as final map address.
422 else if ((hugepage_sz
== RTE_PGSIZE_1G
)
423 || (hugepage_sz
== RTE_PGSIZE_16G
)) {
424 hugepg_tbl
[i
].final_va
= hugepg_tbl
[i
].orig_va
;
425 hugepg_tbl
[i
].orig_va
= NULL
;
429 else if (vma_len
== 0) {
430 unsigned j
, num_pages
;
432 /* reserve a virtual area for next contiguous
433 * physical block: count the number of
434 * contiguous physical pages. */
435 for (j
= i
+1; j
< hpi
->num_pages
[0] ; j
++) {
436 #ifdef RTE_ARCH_PPC_64
437 /* The physical addresses are sorted in
438 * descending order on PPC64 */
439 if (hugepg_tbl
[j
].physaddr
!=
440 hugepg_tbl
[j
-1].physaddr
- hugepage_sz
)
443 if (hugepg_tbl
[j
].physaddr
!=
444 hugepg_tbl
[j
-1].physaddr
+ hugepage_sz
)
449 vma_len
= num_pages
* hugepage_sz
;
451 /* get the biggest virtual memory area up to
452 * vma_len. If it fails, vma_addr is NULL, so
453 * let the kernel provide the address. */
454 vma_addr
= get_virtual_area(&vma_len
, hpi
->hugepage_sz
);
455 if (vma_addr
== NULL
)
456 vma_len
= hugepage_sz
;
459 /* try to create hugepage file */
460 fd
= open(hugepg_tbl
[i
].filepath
, O_CREAT
| O_RDWR
, 0600);
462 RTE_LOG(DEBUG
, EAL
, "%s(): open failed: %s\n", __func__
,
467 /* map the segment, and populate page tables,
468 * the kernel fills this segment with zeros */
469 virtaddr
= mmap(vma_addr
, hugepage_sz
, PROT_READ
| PROT_WRITE
,
470 MAP_SHARED
| MAP_POPULATE
, fd
, 0);
471 if (virtaddr
== MAP_FAILED
) {
472 RTE_LOG(DEBUG
, EAL
, "%s(): mmap failed: %s\n", __func__
,
479 hugepg_tbl
[i
].orig_va
= virtaddr
;
482 hugepg_tbl
[i
].final_va
= virtaddr
;
486 /* In linux, hugetlb limitations, like cgroup, are
487 * enforced at fault time instead of mmap(), even
488 * with the option of MAP_POPULATE. Kernel will send
489 * a SIGBUS signal. To avoid to be killed, save stack
490 * environment here, if SIGBUS happens, we can jump
493 if (huge_wrap_sigsetjmp()) {
494 RTE_LOG(DEBUG
, EAL
, "SIGBUS: Cannot mmap more "
495 "hugepages of size %u MB\n",
496 (unsigned)(hugepage_sz
/ 0x100000));
497 munmap(virtaddr
, hugepage_sz
);
499 unlink(hugepg_tbl
[i
].filepath
);
502 *(int *)virtaddr
= 0;
506 /* set shared flock on the file. */
507 if (flock(fd
, LOCK_SH
| LOCK_NB
) == -1) {
508 RTE_LOG(DEBUG
, EAL
, "%s(): Locking file failed:%s \n",
509 __func__
, strerror(errno
));
516 vma_addr
= (char *)vma_addr
+ hugepage_sz
;
517 vma_len
-= hugepage_sz
;
523 /* Unmap all hugepages from original mapping */
525 unmap_all_hugepages_orig(struct hugepage_file
*hugepg_tbl
, struct hugepage_info
*hpi
)
528 for (i
= 0; i
< hpi
->num_pages
[0]; i
++) {
529 if (hugepg_tbl
[i
].orig_va
) {
530 munmap(hugepg_tbl
[i
].orig_va
, hpi
->hugepage_sz
);
531 hugepg_tbl
[i
].orig_va
= NULL
;
538 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
542 find_numasocket(struct hugepage_file
*hugepg_tbl
, struct hugepage_info
*hpi
)
546 unsigned i
, hp_count
= 0;
549 char hugedir_str
[PATH_MAX
];
552 f
= fopen("/proc/self/numa_maps", "r");
554 RTE_LOG(NOTICE
, EAL
, "cannot open /proc/self/numa_maps,"
555 " consider that all memory is in socket_id 0\n");
559 snprintf(hugedir_str
, sizeof(hugedir_str
),
560 "%s/%s", hpi
->hugedir
, internal_config
.hugefile_prefix
);
563 while (fgets(buf
, sizeof(buf
), f
) != NULL
) {
565 /* ignore non huge page */
566 if (strstr(buf
, " huge ") == NULL
&&
567 strstr(buf
, hugedir_str
) == NULL
)
571 virt_addr
= strtoull(buf
, &end
, 16);
572 if (virt_addr
== 0 || end
== buf
) {
573 RTE_LOG(ERR
, EAL
, "%s(): error in numa_maps parsing\n", __func__
);
577 /* get node id (socket id) */
578 nodestr
= strstr(buf
, " N");
579 if (nodestr
== NULL
) {
580 RTE_LOG(ERR
, EAL
, "%s(): error in numa_maps parsing\n", __func__
);
584 end
= strstr(nodestr
, "=");
586 RTE_LOG(ERR
, EAL
, "%s(): error in numa_maps parsing\n", __func__
);
592 socket_id
= strtoul(nodestr
, &end
, 0);
593 if ((nodestr
[0] == '\0') || (end
== NULL
) || (*end
!= '\0')) {
594 RTE_LOG(ERR
, EAL
, "%s(): error in numa_maps parsing\n", __func__
);
598 /* if we find this page in our mappings, set socket_id */
599 for (i
= 0; i
< hpi
->num_pages
[0]; i
++) {
600 void *va
= (void *)(unsigned long)virt_addr
;
601 if (hugepg_tbl
[i
].orig_va
== va
) {
602 hugepg_tbl
[i
].socket_id
= socket_id
;
608 if (hp_count
< hpi
->num_pages
[0])
620 cmp_physaddr(const void *a
, const void *b
)
622 #ifndef RTE_ARCH_PPC_64
623 const struct hugepage_file
*p1
= a
;
624 const struct hugepage_file
*p2
= b
;
626 /* PowerPC needs memory sorted in reverse order from x86 */
627 const struct hugepage_file
*p1
= b
;
628 const struct hugepage_file
*p2
= a
;
630 if (p1
->physaddr
< p2
->physaddr
)
632 else if (p1
->physaddr
> p2
->physaddr
)
639 * Uses mmap to create a shared memory area for storage of data
640 * Used in this file to store the hugepage file map on disk
643 create_shared_memory(const char *filename
, const size_t mem_size
)
646 int fd
= open(filename
, O_CREAT
| O_RDWR
, 0666);
649 if (ftruncate(fd
, mem_size
) < 0) {
653 retval
= mmap(NULL
, mem_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
659 * this copies *active* hugepages from one hugepage table to another.
660 * destination is typically the shared memory.
663 copy_hugepages_to_shared_mem(struct hugepage_file
* dst
, int dest_size
,
664 const struct hugepage_file
* src
, int src_size
)
666 int src_pos
, dst_pos
= 0;
668 for (src_pos
= 0; src_pos
< src_size
; src_pos
++) {
669 if (src
[src_pos
].final_va
!= NULL
) {
670 /* error on overflow attempt */
671 if (dst_pos
== dest_size
)
673 memcpy(&dst
[dst_pos
], &src
[src_pos
], sizeof(struct hugepage_file
));
681 unlink_hugepage_files(struct hugepage_file
*hugepg_tbl
,
682 unsigned num_hp_info
)
684 unsigned socket
, size
;
685 int page
, nrpages
= 0;
687 /* get total number of hugepages */
688 for (size
= 0; size
< num_hp_info
; size
++)
689 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
; socket
++)
691 internal_config
.hugepage_info
[size
].num_pages
[socket
];
693 for (page
= 0; page
< nrpages
; page
++) {
694 struct hugepage_file
*hp
= &hugepg_tbl
[page
];
696 if (hp
->final_va
!= NULL
&& unlink(hp
->filepath
)) {
697 RTE_LOG(WARNING
, EAL
, "%s(): Removing %s failed: %s\n",
698 __func__
, hp
->filepath
, strerror(errno
));
705 * unmaps hugepages that are not going to be used. since we originally allocate
706 * ALL hugepages (not just those we need), additional unmapping needs to be done.
709 unmap_unneeded_hugepages(struct hugepage_file
*hugepg_tbl
,
710 struct hugepage_info
*hpi
,
711 unsigned num_hp_info
)
713 unsigned socket
, size
;
714 int page
, nrpages
= 0;
716 /* get total number of hugepages */
717 for (size
= 0; size
< num_hp_info
; size
++)
718 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
; socket
++)
719 nrpages
+= internal_config
.hugepage_info
[size
].num_pages
[socket
];
721 for (size
= 0; size
< num_hp_info
; size
++) {
722 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
; socket
++) {
723 unsigned pages_found
= 0;
725 /* traverse until we have unmapped all the unused pages */
726 for (page
= 0; page
< nrpages
; page
++) {
727 struct hugepage_file
*hp
= &hugepg_tbl
[page
];
729 /* find a page that matches the criteria */
730 if ((hp
->size
== hpi
[size
].hugepage_sz
) &&
731 (hp
->socket_id
== (int) socket
)) {
733 /* if we skipped enough pages, unmap the rest */
734 if (pages_found
== hpi
[size
].num_pages
[socket
]) {
737 unmap_len
= hp
->size
;
739 /* get start addr and len of the remaining segment */
740 munmap(hp
->final_va
, (size_t) unmap_len
);
743 if (unlink(hp
->filepath
) == -1) {
744 RTE_LOG(ERR
, EAL
, "%s(): Removing %s failed: %s\n",
745 __func__
, hp
->filepath
, strerror(errno
));
749 /* lock the page and skip */
755 } /* foreach socket */
756 } /* foreach pagesize */
761 static inline uint64_t
762 get_socket_mem_size(int socket
)
767 for (i
= 0; i
< internal_config
.num_hugepage_sizes
; i
++){
768 struct hugepage_info
*hpi
= &internal_config
.hugepage_info
[i
];
769 if (hpi
->hugedir
!= NULL
)
770 size
+= hpi
->hugepage_sz
* hpi
->num_pages
[socket
];
777 * This function is a NUMA-aware equivalent of calc_num_pages.
778 * It takes in the list of hugepage sizes and the
779 * number of pages thereof, and calculates the best number of
780 * pages of each size to fulfill the request for <memory> ram
783 calc_num_pages_per_socket(uint64_t * memory
,
784 struct hugepage_info
*hp_info
,
785 struct hugepage_info
*hp_used
,
786 unsigned num_hp_info
)
788 unsigned socket
, j
, i
= 0;
789 unsigned requested
, available
;
790 int total_num_pages
= 0;
791 uint64_t remaining_mem
, cur_mem
;
792 uint64_t total_mem
= internal_config
.memory
;
794 if (num_hp_info
== 0)
797 /* if specific memory amounts per socket weren't requested */
798 if (internal_config
.force_sockets
== 0) {
799 int cpu_per_socket
[RTE_MAX_NUMA_NODES
];
800 size_t default_size
, total_size
;
803 /* Compute number of cores per socket */
804 memset(cpu_per_socket
, 0, sizeof(cpu_per_socket
));
805 RTE_LCORE_FOREACH(lcore_id
) {
806 cpu_per_socket
[rte_lcore_to_socket_id(lcore_id
)]++;
810 * Automatically spread requested memory amongst detected sockets according
811 * to number of cores from cpu mask present on each socket
813 total_size
= internal_config
.memory
;
814 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
&& total_size
!= 0; socket
++) {
816 /* Set memory amount per socket */
817 default_size
= (internal_config
.memory
* cpu_per_socket
[socket
])
820 /* Limit to maximum available memory on socket */
821 default_size
= RTE_MIN(default_size
, get_socket_mem_size(socket
));
824 memory
[socket
] = default_size
;
825 total_size
-= default_size
;
829 * If some memory is remaining, try to allocate it by getting all
830 * available memory from sockets, one after the other
832 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
&& total_size
!= 0; socket
++) {
833 /* take whatever is available */
834 default_size
= RTE_MIN(get_socket_mem_size(socket
) - memory
[socket
],
838 memory
[socket
] += default_size
;
839 total_size
-= default_size
;
843 for (socket
= 0; socket
< RTE_MAX_NUMA_NODES
&& total_mem
!= 0; socket
++) {
844 /* skips if the memory on specific socket wasn't requested */
845 for (i
= 0; i
< num_hp_info
&& memory
[socket
] != 0; i
++){
846 hp_used
[i
].hugedir
= hp_info
[i
].hugedir
;
847 hp_used
[i
].num_pages
[socket
] = RTE_MIN(
848 memory
[socket
] / hp_info
[i
].hugepage_sz
,
849 hp_info
[i
].num_pages
[socket
]);
851 cur_mem
= hp_used
[i
].num_pages
[socket
] *
852 hp_used
[i
].hugepage_sz
;
854 memory
[socket
] -= cur_mem
;
855 total_mem
-= cur_mem
;
857 total_num_pages
+= hp_used
[i
].num_pages
[socket
];
859 /* check if we have met all memory requests */
860 if (memory
[socket
] == 0)
863 /* check if we have any more pages left at this size, if so
864 * move on to next size */
865 if (hp_used
[i
].num_pages
[socket
] == hp_info
[i
].num_pages
[socket
])
867 /* At this point we know that there are more pages available that are
868 * bigger than the memory we want, so lets see if we can get enough
869 * from other page sizes.
872 for (j
= i
+1; j
< num_hp_info
; j
++)
873 remaining_mem
+= hp_info
[j
].hugepage_sz
*
874 hp_info
[j
].num_pages
[socket
];
876 /* is there enough other memory, if not allocate another page and quit */
877 if (remaining_mem
< memory
[socket
]){
878 cur_mem
= RTE_MIN(memory
[socket
],
879 hp_info
[i
].hugepage_sz
);
880 memory
[socket
] -= cur_mem
;
881 total_mem
-= cur_mem
;
882 hp_used
[i
].num_pages
[socket
]++;
884 break; /* we are done with this socket*/
887 /* if we didn't satisfy all memory requirements per socket */
888 if (memory
[socket
] > 0) {
889 /* to prevent icc errors */
890 requested
= (unsigned) (internal_config
.socket_mem
[socket
] /
892 available
= requested
-
893 ((unsigned) (memory
[socket
] / 0x100000));
894 RTE_LOG(ERR
, EAL
, "Not enough memory available on socket %u! "
895 "Requested: %uMB, available: %uMB\n", socket
,
896 requested
, available
);
901 /* if we didn't satisfy total memory requirements */
903 requested
= (unsigned) (internal_config
.memory
/ 0x100000);
904 available
= requested
- (unsigned) (total_mem
/ 0x100000);
905 RTE_LOG(ERR
, EAL
, "Not enough memory available! Requested: %uMB,"
906 " available: %uMB\n", requested
, available
);
909 return total_num_pages
;
913 eal_get_hugepage_mem_size(void)
918 for (i
= 0; i
< internal_config
.num_hugepage_sizes
; i
++) {
919 struct hugepage_info
*hpi
= &internal_config
.hugepage_info
[i
];
920 if (hpi
->hugedir
!= NULL
) {
921 for (j
= 0; j
< RTE_MAX_NUMA_NODES
; j
++) {
922 size
+= hpi
->hugepage_sz
* hpi
->num_pages
[j
];
927 return (size
< SIZE_MAX
) ? (size_t)(size
) : SIZE_MAX
;
930 static struct sigaction huge_action_old
;
931 static int huge_need_recover
;
934 huge_register_sigbus(void)
937 struct sigaction action
;
940 sigaddset(&mask
, SIGBUS
);
942 action
.sa_mask
= mask
;
943 action
.sa_handler
= huge_sigbus_handler
;
945 huge_need_recover
= !sigaction(SIGBUS
, &action
, &huge_action_old
);
949 huge_recover_sigbus(void)
951 if (huge_need_recover
) {
952 sigaction(SIGBUS
, &huge_action_old
, NULL
);
953 huge_need_recover
= 0;
958 * Prepare physical memory mapping: fill configuration structure with
959 * these infos, return 0 on success.
960 * 1. map N huge pages in separate files in hugetlbfs
961 * 2. find associated physical addr
962 * 3. find associated NUMA socket ID
963 * 4. sort all huge pages by physical address
964 * 5. remap these N huge pages in the correct order
965 * 6. unmap the first mapping
966 * 7. fill memsegs in configuration with contiguous zones
969 rte_eal_hugepage_init(void)
971 struct rte_mem_config
*mcfg
;
972 struct hugepage_file
*hugepage
= NULL
, *tmp_hp
= NULL
;
973 struct hugepage_info used_hp
[MAX_HUGEPAGE_SIZES
];
975 uint64_t memory
[RTE_MAX_NUMA_NODES
];
978 int i
, j
, new_memseg
;
979 int nr_hugefiles
, nr_hugepages
= 0;
982 test_phys_addrs_available();
984 memset(used_hp
, 0, sizeof(used_hp
));
986 /* get pointer to global configuration */
987 mcfg
= rte_eal_get_configuration()->mem_config
;
989 /* hugetlbfs can be disabled */
990 if (internal_config
.no_hugetlbfs
) {
991 addr
= mmap(NULL
, internal_config
.memory
, PROT_READ
| PROT_WRITE
,
992 MAP_PRIVATE
| MAP_ANONYMOUS
, 0, 0);
993 if (addr
== MAP_FAILED
) {
994 RTE_LOG(ERR
, EAL
, "%s: mmap() failed: %s\n", __func__
,
998 mcfg
->memseg
[0].phys_addr
= (phys_addr_t
)(uintptr_t)addr
;
999 mcfg
->memseg
[0].addr
= addr
;
1000 mcfg
->memseg
[0].hugepage_sz
= RTE_PGSIZE_4K
;
1001 mcfg
->memseg
[0].len
= internal_config
.memory
;
1002 mcfg
->memseg
[0].socket_id
= 0;
1006 /* check if app runs on Xen Dom0 */
1007 if (internal_config
.xen_dom0_support
) {
1008 #ifdef RTE_LIBRTE_XEN_DOM0
1009 /* use dom0_mm kernel driver to init memory */
1010 if (rte_xen_dom0_memory_init() < 0)
1017 /* calculate total number of hugepages available. at this point we haven't
1018 * yet started sorting them so they all are on socket 0 */
1019 for (i
= 0; i
< (int) internal_config
.num_hugepage_sizes
; i
++) {
1020 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1021 used_hp
[i
].hugepage_sz
= internal_config
.hugepage_info
[i
].hugepage_sz
;
1023 nr_hugepages
+= internal_config
.hugepage_info
[i
].num_pages
[0];
1027 * allocate a memory area for hugepage table.
1028 * this isn't shared memory yet. due to the fact that we need some
1029 * processing done on these pages, shared memory will be created
1032 tmp_hp
= malloc(nr_hugepages
* sizeof(struct hugepage_file
));
1036 memset(tmp_hp
, 0, nr_hugepages
* sizeof(struct hugepage_file
));
1038 hp_offset
= 0; /* where we start the current page size entries */
1040 huge_register_sigbus();
1042 /* map all hugepages and sort them */
1043 for (i
= 0; i
< (int)internal_config
.num_hugepage_sizes
; i
++){
1044 unsigned pages_old
, pages_new
;
1045 struct hugepage_info
*hpi
;
1048 * we don't yet mark hugepages as used at this stage, so
1049 * we just map all hugepages available to the system
1050 * all hugepages are still located on socket 0
1052 hpi
= &internal_config
.hugepage_info
[i
];
1054 if (hpi
->num_pages
[0] == 0)
1057 /* map all hugepages available */
1058 pages_old
= hpi
->num_pages
[0];
1059 pages_new
= map_all_hugepages(&tmp_hp
[hp_offset
], hpi
, 1);
1060 if (pages_new
< pages_old
) {
1062 "%d not %d hugepages of size %u MB allocated\n",
1063 pages_new
, pages_old
,
1064 (unsigned)(hpi
->hugepage_sz
/ 0x100000));
1066 int pages
= pages_old
- pages_new
;
1068 nr_hugepages
-= pages
;
1069 hpi
->num_pages
[0] = pages_new
;
1074 if (phys_addrs_available
) {
1075 /* find physical addresses for each hugepage */
1076 if (find_physaddrs(&tmp_hp
[hp_offset
], hpi
) < 0) {
1077 RTE_LOG(DEBUG
, EAL
, "Failed to find phys addr "
1078 "for %u MB pages\n",
1079 (unsigned int)(hpi
->hugepage_sz
/ 0x100000));
1083 /* set physical addresses for each hugepage */
1084 if (set_physaddrs(&tmp_hp
[hp_offset
], hpi
) < 0) {
1085 RTE_LOG(DEBUG
, EAL
, "Failed to set phys addr "
1086 "for %u MB pages\n",
1087 (unsigned int)(hpi
->hugepage_sz
/ 0x100000));
1092 if (find_numasocket(&tmp_hp
[hp_offset
], hpi
) < 0){
1093 RTE_LOG(DEBUG
, EAL
, "Failed to find NUMA socket for %u MB pages\n",
1094 (unsigned)(hpi
->hugepage_sz
/ 0x100000));
1098 qsort(&tmp_hp
[hp_offset
], hpi
->num_pages
[0],
1099 sizeof(struct hugepage_file
), cmp_physaddr
);
1101 /* remap all hugepages */
1102 if (map_all_hugepages(&tmp_hp
[hp_offset
], hpi
, 0) !=
1103 hpi
->num_pages
[0]) {
1104 RTE_LOG(ERR
, EAL
, "Failed to remap %u MB pages\n",
1105 (unsigned)(hpi
->hugepage_sz
/ 0x100000));
1109 /* unmap original mappings */
1110 if (unmap_all_hugepages_orig(&tmp_hp
[hp_offset
], hpi
) < 0)
1113 /* we have processed a num of hugepages of this size, so inc offset */
1114 hp_offset
+= hpi
->num_pages
[0];
1117 huge_recover_sigbus();
1119 if (internal_config
.memory
== 0 && internal_config
.force_sockets
== 0)
1120 internal_config
.memory
= eal_get_hugepage_mem_size();
1122 nr_hugefiles
= nr_hugepages
;
1125 /* clean out the numbers of pages */
1126 for (i
= 0; i
< (int) internal_config
.num_hugepage_sizes
; i
++)
1127 for (j
= 0; j
< RTE_MAX_NUMA_NODES
; j
++)
1128 internal_config
.hugepage_info
[i
].num_pages
[j
] = 0;
1130 /* get hugepages for each socket */
1131 for (i
= 0; i
< nr_hugefiles
; i
++) {
1132 int socket
= tmp_hp
[i
].socket_id
;
1134 /* find a hugepage info with right size and increment num_pages */
1135 const int nb_hpsizes
= RTE_MIN(MAX_HUGEPAGE_SIZES
,
1136 (int)internal_config
.num_hugepage_sizes
);
1137 for (j
= 0; j
< nb_hpsizes
; j
++) {
1138 if (tmp_hp
[i
].size
==
1139 internal_config
.hugepage_info
[j
].hugepage_sz
) {
1140 internal_config
.hugepage_info
[j
].num_pages
[socket
]++;
1145 /* make a copy of socket_mem, needed for number of pages calculation */
1146 for (i
= 0; i
< RTE_MAX_NUMA_NODES
; i
++)
1147 memory
[i
] = internal_config
.socket_mem
[i
];
1149 /* calculate final number of pages */
1150 nr_hugepages
= calc_num_pages_per_socket(memory
,
1151 internal_config
.hugepage_info
, used_hp
,
1152 internal_config
.num_hugepage_sizes
);
1154 /* error if not enough memory available */
1155 if (nr_hugepages
< 0)
1159 for (i
= 0; i
< (int) internal_config
.num_hugepage_sizes
; i
++) {
1160 for (j
= 0; j
< RTE_MAX_NUMA_NODES
; j
++) {
1161 if (used_hp
[i
].num_pages
[j
] > 0) {
1163 "Requesting %u pages of size %uMB"
1164 " from socket %i\n",
1165 used_hp
[i
].num_pages
[j
],
1167 (used_hp
[i
].hugepage_sz
/ 0x100000),
1173 /* create shared memory */
1174 hugepage
= create_shared_memory(eal_hugepage_info_path(),
1175 nr_hugefiles
* sizeof(struct hugepage_file
));
1177 if (hugepage
== NULL
) {
1178 RTE_LOG(ERR
, EAL
, "Failed to create shared memory!\n");
1181 memset(hugepage
, 0, nr_hugefiles
* sizeof(struct hugepage_file
));
1184 * unmap pages that we won't need (looks at used_hp).
1185 * also, sets final_va to NULL on pages that were unmapped.
1187 if (unmap_unneeded_hugepages(tmp_hp
, used_hp
,
1188 internal_config
.num_hugepage_sizes
) < 0) {
1189 RTE_LOG(ERR
, EAL
, "Unmapping and locking hugepages failed!\n");
1194 * copy stuff from malloc'd hugepage* to the actual shared memory.
1195 * this procedure only copies those hugepages that have final_va
1196 * not NULL. has overflow protection.
1198 if (copy_hugepages_to_shared_mem(hugepage
, nr_hugefiles
,
1199 tmp_hp
, nr_hugefiles
) < 0) {
1200 RTE_LOG(ERR
, EAL
, "Copying tables to shared memory failed!\n");
1204 /* free the hugepage backing files */
1205 if (internal_config
.hugepage_unlink
&&
1206 unlink_hugepage_files(tmp_hp
, internal_config
.num_hugepage_sizes
) < 0) {
1207 RTE_LOG(ERR
, EAL
, "Unlinking hugepage files failed!\n");
1211 /* free the temporary hugepage table */
1215 /* first memseg index shall be 0 after incrementing it below */
1217 for (i
= 0; i
< nr_hugefiles
; i
++) {
1220 /* if this is a new section, create a new memseg */
1223 else if (hugepage
[i
].socket_id
!= hugepage
[i
-1].socket_id
)
1225 else if (hugepage
[i
].size
!= hugepage
[i
-1].size
)
1228 #ifdef RTE_ARCH_PPC_64
1229 /* On PPC64 architecture, the mmap always start from higher
1230 * virtual address to lower address. Here, both the physical
1231 * address and virtual address are in descending order */
1232 else if ((hugepage
[i
-1].physaddr
- hugepage
[i
].physaddr
) !=
1235 else if (((unsigned long)hugepage
[i
-1].final_va
-
1236 (unsigned long)hugepage
[i
].final_va
) != hugepage
[i
].size
)
1239 else if ((hugepage
[i
].physaddr
- hugepage
[i
-1].physaddr
) !=
1242 else if (((unsigned long)hugepage
[i
].final_va
-
1243 (unsigned long)hugepage
[i
-1].final_va
) != hugepage
[i
].size
)
1249 if (j
== RTE_MAX_MEMSEG
)
1252 mcfg
->memseg
[j
].phys_addr
= hugepage
[i
].physaddr
;
1253 mcfg
->memseg
[j
].addr
= hugepage
[i
].final_va
;
1254 mcfg
->memseg
[j
].len
= hugepage
[i
].size
;
1255 mcfg
->memseg
[j
].socket_id
= hugepage
[i
].socket_id
;
1256 mcfg
->memseg
[j
].hugepage_sz
= hugepage
[i
].size
;
1258 /* continuation of previous memseg */
1260 #ifdef RTE_ARCH_PPC_64
1261 /* Use the phy and virt address of the last page as segment
1262 * address for IBM Power architecture */
1263 mcfg
->memseg
[j
].phys_addr
= hugepage
[i
].physaddr
;
1264 mcfg
->memseg
[j
].addr
= hugepage
[i
].final_va
;
1266 mcfg
->memseg
[j
].len
+= mcfg
->memseg
[j
].hugepage_sz
;
1268 hugepage
[i
].memseg_id
= j
;
1271 if (i
< nr_hugefiles
) {
1272 RTE_LOG(ERR
, EAL
, "Can only reserve %d pages "
1273 "from %d requested\n"
1274 "Current %s=%d is not enough\n"
1275 "Please either increase it or request less amount "
1277 i
, nr_hugefiles
, RTE_STR(CONFIG_RTE_MAX_MEMSEG
),
1282 munmap(hugepage
, nr_hugefiles
* sizeof(struct hugepage_file
));
1287 huge_recover_sigbus();
1289 if (hugepage
!= NULL
)
1290 munmap(hugepage
, nr_hugefiles
* sizeof(struct hugepage_file
));
1296 * uses fstat to report the size of a file on disk
1302 if (fstat(fd
, &st
) < 0)
1308 * This creates the memory mappings in the secondary process to match that of
1309 * the server process. It goes through each memory segment in the DPDK runtime
1310 * configuration and finds the hugepages which form that segment, mapping them
1311 * in order to form a contiguous block in the virtual memory space
1314 rte_eal_hugepage_attach(void)
1316 const struct rte_mem_config
*mcfg
= rte_eal_get_configuration()->mem_config
;
1317 struct hugepage_file
*hp
= NULL
;
1318 unsigned num_hp
= 0;
1319 unsigned i
, s
= 0; /* s used to track the segment number */
1320 unsigned max_seg
= RTE_MAX_MEMSEG
;
1322 int fd
, fd_zero
= -1, fd_hugepage
= -1;
1324 if (aslr_enabled() > 0) {
1325 RTE_LOG(WARNING
, EAL
, "WARNING: Address Space Layout Randomization "
1326 "(ASLR) is enabled in the kernel.\n");
1327 RTE_LOG(WARNING
, EAL
, " This may cause issues with mapping memory "
1328 "into secondary processes\n");
1331 test_phys_addrs_available();
1333 if (internal_config
.xen_dom0_support
) {
1334 #ifdef RTE_LIBRTE_XEN_DOM0
1335 if (rte_xen_dom0_memory_attach() < 0) {
1336 RTE_LOG(ERR
, EAL
, "Failed to attach memory segments of primary "
1344 fd_zero
= open("/dev/zero", O_RDONLY
);
1346 RTE_LOG(ERR
, EAL
, "Could not open /dev/zero\n");
1349 fd_hugepage
= open(eal_hugepage_info_path(), O_RDONLY
);
1350 if (fd_hugepage
< 0) {
1351 RTE_LOG(ERR
, EAL
, "Could not open %s\n", eal_hugepage_info_path());
1355 /* map all segments into memory to make sure we get the addrs */
1356 for (s
= 0; s
< RTE_MAX_MEMSEG
; ++s
) {
1360 * the first memory segment with len==0 is the one that
1361 * follows the last valid segment.
1363 if (mcfg
->memseg
[s
].len
== 0)
1367 * fdzero is mmapped to get a contiguous block of virtual
1368 * addresses of the appropriate memseg size.
1369 * use mmap to get identical addresses as the primary process.
1371 base_addr
= mmap(mcfg
->memseg
[s
].addr
, mcfg
->memseg
[s
].len
,
1373 #ifdef RTE_ARCH_PPC_64
1374 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_HUGETLB
,
1379 if (base_addr
== MAP_FAILED
||
1380 base_addr
!= mcfg
->memseg
[s
].addr
) {
1382 if (base_addr
!= MAP_FAILED
) {
1383 /* errno is stale, don't use */
1384 RTE_LOG(ERR
, EAL
, "Could not mmap %llu bytes "
1385 "in /dev/zero at [%p], got [%p] - "
1386 "please use '--base-virtaddr' option\n",
1387 (unsigned long long)mcfg
->memseg
[s
].len
,
1388 mcfg
->memseg
[s
].addr
, base_addr
);
1389 munmap(base_addr
, mcfg
->memseg
[s
].len
);
1391 RTE_LOG(ERR
, EAL
, "Could not mmap %llu bytes "
1392 "in /dev/zero at [%p]: '%s'\n",
1393 (unsigned long long)mcfg
->memseg
[s
].len
,
1394 mcfg
->memseg
[s
].addr
, strerror(errno
));
1396 if (aslr_enabled() > 0) {
1397 RTE_LOG(ERR
, EAL
, "It is recommended to "
1398 "disable ASLR in the kernel "
1399 "and retry running both primary "
1400 "and secondary processes\n");
1406 size
= getFileSize(fd_hugepage
);
1407 hp
= mmap(NULL
, size
, PROT_READ
, MAP_PRIVATE
, fd_hugepage
, 0);
1408 if (hp
== MAP_FAILED
) {
1409 RTE_LOG(ERR
, EAL
, "Could not mmap %s\n", eal_hugepage_info_path());
1413 num_hp
= size
/ sizeof(struct hugepage_file
);
1414 RTE_LOG(DEBUG
, EAL
, "Analysing %u files\n", num_hp
);
1417 while (s
< RTE_MAX_MEMSEG
&& mcfg
->memseg
[s
].len
> 0){
1418 void *addr
, *base_addr
;
1419 uintptr_t offset
= 0;
1420 size_t mapping_size
;
1422 * free previously mapped memory so we can map the
1423 * hugepages into the space
1425 base_addr
= mcfg
->memseg
[s
].addr
;
1426 munmap(base_addr
, mcfg
->memseg
[s
].len
);
1428 /* find the hugepages for this segment and map them
1429 * we don't need to worry about order, as the server sorted the
1430 * entries before it did the second mmap of them */
1431 for (i
= 0; i
< num_hp
&& offset
< mcfg
->memseg
[s
].len
; i
++){
1432 if (hp
[i
].memseg_id
== (int)s
){
1433 fd
= open(hp
[i
].filepath
, O_RDWR
);
1435 RTE_LOG(ERR
, EAL
, "Could not open %s\n",
1439 mapping_size
= hp
[i
].size
;
1440 addr
= mmap(RTE_PTR_ADD(base_addr
, offset
),
1441 mapping_size
, PROT_READ
| PROT_WRITE
,
1443 close(fd
); /* close file both on success and on failure */
1444 if (addr
== MAP_FAILED
||
1445 addr
!= RTE_PTR_ADD(base_addr
, offset
)) {
1446 RTE_LOG(ERR
, EAL
, "Could not mmap %s\n",
1450 offset
+=mapping_size
;
1453 RTE_LOG(DEBUG
, EAL
, "Mapped segment %u of size 0x%llx\n", s
,
1454 (unsigned long long)mcfg
->memseg
[s
].len
);
1457 /* unmap the hugepage config file, since we are done using it */
1464 for (i
= 0; i
< max_seg
&& mcfg
->memseg
[i
].len
> 0; i
++)
1465 munmap(mcfg
->memseg
[i
].addr
, mcfg
->memseg
[i
].len
);
1466 if (hp
!= NULL
&& hp
!= MAP_FAILED
)
1470 if (fd_hugepage
>= 0)
1476 rte_eal_using_phys_addrs(void)
1478 return phys_addrs_available
;