]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation. | |
3 | * Copyright(c) 2013 6WIND S.A. | |
4 | */ | |
5 | ||
6 | #define _FILE_OFFSET_BITS 64 | |
7 | #include <errno.h> | |
8 | #include <stdarg.h> | |
9 | #include <stdbool.h> | |
10 | #include <stdlib.h> | |
11 | #include <stdio.h> | |
12 | #include <stdint.h> | |
13 | #include <inttypes.h> | |
14 | #include <string.h> | |
15 | #include <sys/mman.h> | |
16 | #include <sys/types.h> | |
17 | #include <sys/stat.h> | |
18 | #include <sys/queue.h> | |
19 | #include <sys/file.h> | |
20 | #include <unistd.h> | |
21 | #include <limits.h> | |
22 | #include <sys/ioctl.h> | |
23 | #include <sys/time.h> | |
24 | #include <signal.h> | |
25 | #include <setjmp.h> | |
26 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
27 | #include <numa.h> | |
28 | #include <numaif.h> | |
29 | #endif | |
30 | ||
31 | #include <rte_errno.h> | |
32 | #include <rte_log.h> | |
33 | #include <rte_memory.h> | |
34 | #include <rte_launch.h> | |
35 | #include <rte_eal.h> | |
36 | #include <rte_eal_memconfig.h> | |
37 | #include <rte_per_lcore.h> | |
38 | #include <rte_lcore.h> | |
39 | #include <rte_common.h> | |
40 | #include <rte_string_fns.h> | |
41 | ||
42 | #include "eal_private.h" | |
43 | #include "eal_memalloc.h" | |
44 | #include "eal_internal_cfg.h" | |
45 | #include "eal_filesystem.h" | |
46 | #include "eal_hugepages.h" | |
47 | ||
48 | #define PFN_MASK_SIZE 8 | |
49 | ||
50 | /** | |
51 | * @file | |
52 | * Huge page mapping under linux | |
53 | * | |
54 | * To reserve a big contiguous amount of memory, we use the hugepage | |
55 | * feature of linux. For that, we need to have hugetlbfs mounted. This | |
56 | * code will create many files in this directory (one per page) and | |
57 | * map them in virtual memory. For each page, we will retrieve its | |
58 | * physical address and remap it in order to have a virtual contiguous | |
59 | * zone as well as a physical contiguous zone. | |
60 | */ | |
61 | ||
62 | static bool phys_addrs_available = true; | |
63 | ||
64 | #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space" | |
65 | ||
66 | static void | |
67 | test_phys_addrs_available(void) | |
68 | { | |
69 | uint64_t tmp = 0; | |
70 | phys_addr_t physaddr; | |
71 | ||
72 | if (!rte_eal_has_hugepages()) { | |
73 | RTE_LOG(ERR, EAL, | |
74 | "Started without hugepages support, physical addresses not available\n"); | |
75 | phys_addrs_available = false; | |
76 | return; | |
77 | } | |
78 | ||
79 | physaddr = rte_mem_virt2phy(&tmp); | |
80 | if (physaddr == RTE_BAD_PHYS_ADDR) { | |
81 | if (rte_eal_iova_mode() == RTE_IOVA_PA) | |
82 | RTE_LOG(ERR, EAL, | |
83 | "Cannot obtain physical addresses: %s. " | |
84 | "Only vfio will function.\n", | |
85 | strerror(errno)); | |
86 | phys_addrs_available = false; | |
87 | } | |
88 | } | |
89 | ||
90 | /* | |
91 | * Get physical address of any mapped virtual address in the current process. | |
92 | */ | |
93 | phys_addr_t | |
94 | rte_mem_virt2phy(const void *virtaddr) | |
95 | { | |
96 | int fd, retval; | |
97 | uint64_t page, physaddr; | |
98 | unsigned long virt_pfn; | |
99 | int page_size; | |
100 | off_t offset; | |
101 | ||
102 | /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */ | |
103 | if (!phys_addrs_available) | |
104 | return RTE_BAD_IOVA; | |
105 | ||
106 | /* standard page size */ | |
107 | page_size = getpagesize(); | |
108 | ||
109 | fd = open("/proc/self/pagemap", O_RDONLY); | |
110 | if (fd < 0) { | |
111 | RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n", | |
112 | __func__, strerror(errno)); | |
113 | return RTE_BAD_IOVA; | |
114 | } | |
115 | ||
116 | virt_pfn = (unsigned long)virtaddr / page_size; | |
117 | offset = sizeof(uint64_t) * virt_pfn; | |
118 | if (lseek(fd, offset, SEEK_SET) == (off_t) -1) { | |
119 | RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n", | |
120 | __func__, strerror(errno)); | |
121 | close(fd); | |
122 | return RTE_BAD_IOVA; | |
123 | } | |
124 | ||
125 | retval = read(fd, &page, PFN_MASK_SIZE); | |
126 | close(fd); | |
127 | if (retval < 0) { | |
128 | RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n", | |
129 | __func__, strerror(errno)); | |
130 | return RTE_BAD_IOVA; | |
131 | } else if (retval != PFN_MASK_SIZE) { | |
132 | RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap " | |
133 | "but expected %d:\n", | |
134 | __func__, retval, PFN_MASK_SIZE); | |
135 | return RTE_BAD_IOVA; | |
136 | } | |
137 | ||
138 | /* | |
139 | * the pfn (page frame number) are bits 0-54 (see | |
140 | * pagemap.txt in linux Documentation) | |
141 | */ | |
142 | if ((page & 0x7fffffffffffffULL) == 0) | |
143 | return RTE_BAD_IOVA; | |
144 | ||
145 | physaddr = ((page & 0x7fffffffffffffULL) * page_size) | |
146 | + ((unsigned long)virtaddr % page_size); | |
147 | ||
148 | return physaddr; | |
149 | } | |
150 | ||
151 | rte_iova_t | |
152 | rte_mem_virt2iova(const void *virtaddr) | |
153 | { | |
154 | if (rte_eal_iova_mode() == RTE_IOVA_VA) | |
155 | return (uintptr_t)virtaddr; | |
156 | return rte_mem_virt2phy(virtaddr); | |
157 | } | |
158 | ||
159 | /* | |
160 | * For each hugepage in hugepg_tbl, fill the physaddr value. We find | |
161 | * it by browsing the /proc/self/pagemap special file. | |
162 | */ | |
163 | static int | |
164 | find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) | |
165 | { | |
166 | unsigned int i; | |
167 | phys_addr_t addr; | |
168 | ||
169 | for (i = 0; i < hpi->num_pages[0]; i++) { | |
170 | addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va); | |
171 | if (addr == RTE_BAD_PHYS_ADDR) | |
172 | return -1; | |
173 | hugepg_tbl[i].physaddr = addr; | |
174 | } | |
175 | return 0; | |
176 | } | |
177 | ||
178 | /* | |
179 | * For each hugepage in hugepg_tbl, fill the physaddr value sequentially. | |
180 | */ | |
181 | static int | |
182 | set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) | |
183 | { | |
184 | unsigned int i; | |
185 | static phys_addr_t addr; | |
186 | ||
187 | for (i = 0; i < hpi->num_pages[0]; i++) { | |
188 | hugepg_tbl[i].physaddr = addr; | |
189 | addr += hugepg_tbl[i].size; | |
190 | } | |
191 | return 0; | |
192 | } | |
193 | ||
194 | /* | |
195 | * Check whether address-space layout randomization is enabled in | |
196 | * the kernel. This is important for multi-process as it can prevent | |
197 | * two processes mapping data to the same virtual address | |
198 | * Returns: | |
199 | * 0 - address space randomization disabled | |
200 | * 1/2 - address space randomization enabled | |
201 | * negative error code on error | |
202 | */ | |
203 | static int | |
204 | aslr_enabled(void) | |
205 | { | |
206 | char c; | |
207 | int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY); | |
208 | if (fd < 0) | |
209 | return -errno; | |
210 | retval = read(fd, &c, 1); | |
211 | close(fd); | |
212 | if (retval < 0) | |
213 | return -errno; | |
214 | if (retval == 0) | |
215 | return -EIO; | |
216 | switch (c) { | |
217 | case '0' : return 0; | |
218 | case '1' : return 1; | |
219 | case '2' : return 2; | |
220 | default: return -EINVAL; | |
221 | } | |
222 | } | |
223 | ||
224 | static sigjmp_buf huge_jmpenv; | |
225 | ||
226 | static void huge_sigbus_handler(int signo __rte_unused) | |
227 | { | |
228 | siglongjmp(huge_jmpenv, 1); | |
229 | } | |
230 | ||
231 | /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile, | |
232 | * non-static local variable in the stack frame calling sigsetjmp might be | |
233 | * clobbered by a call to longjmp. | |
234 | */ | |
235 | static int huge_wrap_sigsetjmp(void) | |
236 | { | |
237 | return sigsetjmp(huge_jmpenv, 1); | |
238 | } | |
239 | ||
240 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
241 | /* Callback for numa library. */ | |
242 | void numa_error(char *where) | |
243 | { | |
244 | RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno)); | |
245 | } | |
246 | #endif | |
247 | ||
248 | /* | |
249 | * Mmap all hugepages of hugepage table: it first open a file in | |
250 | * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the | |
251 | * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored | |
252 | * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to | |
253 | * map contiguous physical blocks in contiguous virtual blocks. | |
254 | */ | |
255 | static unsigned | |
256 | map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi, | |
257 | uint64_t *essential_memory __rte_unused) | |
258 | { | |
259 | int fd; | |
260 | unsigned i; | |
261 | void *virtaddr; | |
262 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
263 | int node_id = -1; | |
264 | int essential_prev = 0; | |
265 | int oldpolicy; | |
266 | struct bitmask *oldmask = numa_allocate_nodemask(); | |
267 | bool have_numa = true; | |
268 | unsigned long maxnode = 0; | |
269 | ||
270 | /* Check if kernel supports NUMA. */ | |
271 | if (numa_available() != 0) { | |
272 | RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n"); | |
273 | have_numa = false; | |
274 | } | |
275 | ||
276 | if (have_numa) { | |
277 | RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n"); | |
278 | if (get_mempolicy(&oldpolicy, oldmask->maskp, | |
279 | oldmask->size + 1, 0, 0) < 0) { | |
280 | RTE_LOG(ERR, EAL, | |
281 | "Failed to get current mempolicy: %s. " | |
282 | "Assuming MPOL_DEFAULT.\n", strerror(errno)); | |
283 | oldpolicy = MPOL_DEFAULT; | |
284 | } | |
285 | for (i = 0; i < RTE_MAX_NUMA_NODES; i++) | |
286 | if (internal_config.socket_mem[i]) | |
287 | maxnode = i + 1; | |
288 | } | |
289 | #endif | |
290 | ||
291 | for (i = 0; i < hpi->num_pages[0]; i++) { | |
292 | struct hugepage_file *hf = &hugepg_tbl[i]; | |
293 | uint64_t hugepage_sz = hpi->hugepage_sz; | |
294 | ||
295 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
296 | if (maxnode) { | |
297 | unsigned int j; | |
298 | ||
299 | for (j = 0; j < maxnode; j++) | |
300 | if (essential_memory[j]) | |
301 | break; | |
302 | ||
303 | if (j == maxnode) { | |
304 | node_id = (node_id + 1) % maxnode; | |
305 | while (!internal_config.socket_mem[node_id]) { | |
306 | node_id++; | |
307 | node_id %= maxnode; | |
308 | } | |
309 | essential_prev = 0; | |
310 | } else { | |
311 | node_id = j; | |
312 | essential_prev = essential_memory[j]; | |
313 | ||
314 | if (essential_memory[j] < hugepage_sz) | |
315 | essential_memory[j] = 0; | |
316 | else | |
317 | essential_memory[j] -= hugepage_sz; | |
318 | } | |
319 | ||
320 | RTE_LOG(DEBUG, EAL, | |
321 | "Setting policy MPOL_PREFERRED for socket %d\n", | |
322 | node_id); | |
323 | numa_set_preferred(node_id); | |
324 | } | |
325 | #endif | |
326 | ||
327 | hf->file_id = i; | |
328 | hf->size = hugepage_sz; | |
329 | eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath), | |
330 | hpi->hugedir, hf->file_id); | |
331 | hf->filepath[sizeof(hf->filepath) - 1] = '\0'; | |
332 | ||
333 | /* try to create hugepage file */ | |
334 | fd = open(hf->filepath, O_CREAT | O_RDWR, 0600); | |
335 | if (fd < 0) { | |
336 | RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__, | |
337 | strerror(errno)); | |
338 | goto out; | |
339 | } | |
340 | ||
341 | /* map the segment, and populate page tables, | |
342 | * the kernel fills this segment with zeros. we don't care where | |
343 | * this gets mapped - we already have contiguous memory areas | |
344 | * ready for us to map into. | |
345 | */ | |
346 | virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE, | |
347 | MAP_SHARED | MAP_POPULATE, fd, 0); | |
348 | if (virtaddr == MAP_FAILED) { | |
349 | RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__, | |
350 | strerror(errno)); | |
351 | close(fd); | |
352 | goto out; | |
353 | } | |
354 | ||
355 | hf->orig_va = virtaddr; | |
356 | ||
357 | /* In linux, hugetlb limitations, like cgroup, are | |
358 | * enforced at fault time instead of mmap(), even | |
359 | * with the option of MAP_POPULATE. Kernel will send | |
360 | * a SIGBUS signal. To avoid to be killed, save stack | |
361 | * environment here, if SIGBUS happens, we can jump | |
362 | * back here. | |
363 | */ | |
364 | if (huge_wrap_sigsetjmp()) { | |
365 | RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more " | |
366 | "hugepages of size %u MB\n", | |
367 | (unsigned int)(hugepage_sz / 0x100000)); | |
368 | munmap(virtaddr, hugepage_sz); | |
369 | close(fd); | |
370 | unlink(hugepg_tbl[i].filepath); | |
371 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
372 | if (maxnode) | |
373 | essential_memory[node_id] = | |
374 | essential_prev; | |
375 | #endif | |
376 | goto out; | |
377 | } | |
378 | *(int *)virtaddr = 0; | |
379 | ||
380 | /* set shared lock on the file. */ | |
381 | if (flock(fd, LOCK_SH) < 0) { | |
382 | RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n", | |
383 | __func__, strerror(errno)); | |
384 | close(fd); | |
385 | goto out; | |
386 | } | |
387 | ||
388 | close(fd); | |
389 | } | |
390 | ||
391 | out: | |
392 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
393 | if (maxnode) { | |
394 | RTE_LOG(DEBUG, EAL, | |
395 | "Restoring previous memory policy: %d\n", oldpolicy); | |
396 | if (oldpolicy == MPOL_DEFAULT) { | |
397 | numa_set_localalloc(); | |
398 | } else if (set_mempolicy(oldpolicy, oldmask->maskp, | |
399 | oldmask->size + 1) < 0) { | |
400 | RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n", | |
401 | strerror(errno)); | |
402 | numa_set_localalloc(); | |
403 | } | |
404 | } | |
405 | numa_free_cpumask(oldmask); | |
406 | #endif | |
407 | return i; | |
408 | } | |
409 | ||
410 | /* | |
411 | * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge | |
412 | * page. | |
413 | */ | |
414 | static int | |
415 | find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) | |
416 | { | |
417 | int socket_id; | |
418 | char *end, *nodestr; | |
419 | unsigned i, hp_count = 0; | |
420 | uint64_t virt_addr; | |
421 | char buf[BUFSIZ]; | |
422 | char hugedir_str[PATH_MAX]; | |
423 | FILE *f; | |
424 | ||
425 | f = fopen("/proc/self/numa_maps", "r"); | |
426 | if (f == NULL) { | |
427 | RTE_LOG(NOTICE, EAL, "NUMA support not available" | |
428 | " consider that all memory is in socket_id 0\n"); | |
429 | return 0; | |
430 | } | |
431 | ||
432 | snprintf(hugedir_str, sizeof(hugedir_str), | |
433 | "%s/%s", hpi->hugedir, internal_config.hugefile_prefix); | |
434 | ||
435 | /* parse numa map */ | |
436 | while (fgets(buf, sizeof(buf), f) != NULL) { | |
437 | ||
438 | /* ignore non huge page */ | |
439 | if (strstr(buf, " huge ") == NULL && | |
440 | strstr(buf, hugedir_str) == NULL) | |
441 | continue; | |
442 | ||
443 | /* get zone addr */ | |
444 | virt_addr = strtoull(buf, &end, 16); | |
445 | if (virt_addr == 0 || end == buf) { | |
446 | RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); | |
447 | goto error; | |
448 | } | |
449 | ||
450 | /* get node id (socket id) */ | |
451 | nodestr = strstr(buf, " N"); | |
452 | if (nodestr == NULL) { | |
453 | RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); | |
454 | goto error; | |
455 | } | |
456 | nodestr += 2; | |
457 | end = strstr(nodestr, "="); | |
458 | if (end == NULL) { | |
459 | RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); | |
460 | goto error; | |
461 | } | |
462 | end[0] = '\0'; | |
463 | end = NULL; | |
464 | ||
465 | socket_id = strtoul(nodestr, &end, 0); | |
466 | if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) { | |
467 | RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); | |
468 | goto error; | |
469 | } | |
470 | ||
471 | /* if we find this page in our mappings, set socket_id */ | |
472 | for (i = 0; i < hpi->num_pages[0]; i++) { | |
473 | void *va = (void *)(unsigned long)virt_addr; | |
474 | if (hugepg_tbl[i].orig_va == va) { | |
475 | hugepg_tbl[i].socket_id = socket_id; | |
476 | hp_count++; | |
477 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
478 | RTE_LOG(DEBUG, EAL, | |
479 | "Hugepage %s is on socket %d\n", | |
480 | hugepg_tbl[i].filepath, socket_id); | |
481 | #endif | |
482 | } | |
483 | } | |
484 | } | |
485 | ||
486 | if (hp_count < hpi->num_pages[0]) | |
487 | goto error; | |
488 | ||
489 | fclose(f); | |
490 | return 0; | |
491 | ||
492 | error: | |
493 | fclose(f); | |
494 | return -1; | |
495 | } | |
496 | ||
497 | static int | |
498 | cmp_physaddr(const void *a, const void *b) | |
499 | { | |
500 | #ifndef RTE_ARCH_PPC_64 | |
501 | const struct hugepage_file *p1 = a; | |
502 | const struct hugepage_file *p2 = b; | |
503 | #else | |
504 | /* PowerPC needs memory sorted in reverse order from x86 */ | |
505 | const struct hugepage_file *p1 = b; | |
506 | const struct hugepage_file *p2 = a; | |
507 | #endif | |
508 | if (p1->physaddr < p2->physaddr) | |
509 | return -1; | |
510 | else if (p1->physaddr > p2->physaddr) | |
511 | return 1; | |
512 | else | |
513 | return 0; | |
514 | } | |
515 | ||
516 | /* | |
517 | * Uses mmap to create a shared memory area for storage of data | |
518 | * Used in this file to store the hugepage file map on disk | |
519 | */ | |
520 | static void * | |
521 | create_shared_memory(const char *filename, const size_t mem_size) | |
522 | { | |
523 | void *retval; | |
524 | int fd; | |
525 | ||
526 | /* if no shared files mode is used, create anonymous memory instead */ | |
527 | if (internal_config.no_shconf) { | |
528 | retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, | |
529 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
530 | if (retval == MAP_FAILED) | |
531 | return NULL; | |
532 | return retval; | |
533 | } | |
534 | ||
535 | fd = open(filename, O_CREAT | O_RDWR, 0666); | |
536 | if (fd < 0) | |
537 | return NULL; | |
538 | if (ftruncate(fd, mem_size) < 0) { | |
539 | close(fd); | |
540 | return NULL; | |
541 | } | |
542 | retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | |
543 | close(fd); | |
544 | if (retval == MAP_FAILED) | |
545 | return NULL; | |
546 | return retval; | |
547 | } | |
548 | ||
549 | /* | |
550 | * this copies *active* hugepages from one hugepage table to another. | |
551 | * destination is typically the shared memory. | |
552 | */ | |
553 | static int | |
554 | copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size, | |
555 | const struct hugepage_file * src, int src_size) | |
556 | { | |
557 | int src_pos, dst_pos = 0; | |
558 | ||
559 | for (src_pos = 0; src_pos < src_size; src_pos++) { | |
560 | if (src[src_pos].orig_va != NULL) { | |
561 | /* error on overflow attempt */ | |
562 | if (dst_pos == dest_size) | |
563 | return -1; | |
564 | memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file)); | |
565 | dst_pos++; | |
566 | } | |
567 | } | |
568 | return 0; | |
569 | } | |
570 | ||
571 | static int | |
572 | unlink_hugepage_files(struct hugepage_file *hugepg_tbl, | |
573 | unsigned num_hp_info) | |
574 | { | |
575 | unsigned socket, size; | |
576 | int page, nrpages = 0; | |
577 | ||
578 | /* get total number of hugepages */ | |
579 | for (size = 0; size < num_hp_info; size++) | |
580 | for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) | |
581 | nrpages += | |
582 | internal_config.hugepage_info[size].num_pages[socket]; | |
583 | ||
584 | for (page = 0; page < nrpages; page++) { | |
585 | struct hugepage_file *hp = &hugepg_tbl[page]; | |
586 | ||
587 | if (hp->orig_va != NULL && unlink(hp->filepath)) { | |
588 | RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n", | |
589 | __func__, hp->filepath, strerror(errno)); | |
590 | } | |
591 | } | |
592 | return 0; | |
593 | } | |
594 | ||
595 | /* | |
596 | * unmaps hugepages that are not going to be used. since we originally allocate | |
597 | * ALL hugepages (not just those we need), additional unmapping needs to be done. | |
598 | */ | |
599 | static int | |
600 | unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl, | |
601 | struct hugepage_info *hpi, | |
602 | unsigned num_hp_info) | |
603 | { | |
604 | unsigned socket, size; | |
605 | int page, nrpages = 0; | |
606 | ||
607 | /* get total number of hugepages */ | |
608 | for (size = 0; size < num_hp_info; size++) | |
609 | for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) | |
610 | nrpages += internal_config.hugepage_info[size].num_pages[socket]; | |
611 | ||
612 | for (size = 0; size < num_hp_info; size++) { | |
613 | for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) { | |
614 | unsigned pages_found = 0; | |
615 | ||
616 | /* traverse until we have unmapped all the unused pages */ | |
617 | for (page = 0; page < nrpages; page++) { | |
618 | struct hugepage_file *hp = &hugepg_tbl[page]; | |
619 | ||
620 | /* find a page that matches the criteria */ | |
621 | if ((hp->size == hpi[size].hugepage_sz) && | |
622 | (hp->socket_id == (int) socket)) { | |
623 | ||
624 | /* if we skipped enough pages, unmap the rest */ | |
625 | if (pages_found == hpi[size].num_pages[socket]) { | |
626 | uint64_t unmap_len; | |
627 | ||
628 | unmap_len = hp->size; | |
629 | ||
630 | /* get start addr and len of the remaining segment */ | |
631 | munmap(hp->orig_va, | |
632 | (size_t)unmap_len); | |
633 | ||
634 | hp->orig_va = NULL; | |
635 | if (unlink(hp->filepath) == -1) { | |
636 | RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n", | |
637 | __func__, hp->filepath, strerror(errno)); | |
638 | return -1; | |
639 | } | |
640 | } else { | |
641 | /* lock the page and skip */ | |
642 | pages_found++; | |
643 | } | |
644 | ||
645 | } /* match page */ | |
646 | } /* foreach page */ | |
647 | } /* foreach socket */ | |
648 | } /* foreach pagesize */ | |
649 | ||
650 | return 0; | |
651 | } | |
652 | ||
653 | static int | |
654 | remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) | |
655 | { | |
656 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
657 | struct rte_memseg_list *msl; | |
658 | struct rte_fbarray *arr; | |
659 | int cur_page, seg_len; | |
660 | unsigned int msl_idx; | |
661 | int ms_idx; | |
662 | uint64_t page_sz; | |
663 | size_t memseg_len; | |
664 | int socket_id; | |
665 | ||
666 | page_sz = hugepages[seg_start].size; | |
667 | socket_id = hugepages[seg_start].socket_id; | |
668 | seg_len = seg_end - seg_start; | |
669 | ||
670 | RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n", | |
671 | (seg_len * page_sz) >> 20ULL, socket_id); | |
672 | ||
673 | /* find free space in memseg lists */ | |
674 | for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) { | |
675 | bool empty; | |
676 | msl = &mcfg->memsegs[msl_idx]; | |
677 | arr = &msl->memseg_arr; | |
678 | ||
679 | if (msl->page_sz != page_sz) | |
680 | continue; | |
681 | if (msl->socket_id != socket_id) | |
682 | continue; | |
683 | ||
684 | /* leave space for a hole if array is not empty */ | |
685 | empty = arr->count == 0; | |
686 | ms_idx = rte_fbarray_find_next_n_free(arr, 0, | |
687 | seg_len + (empty ? 0 : 1)); | |
688 | ||
689 | /* memseg list is full? */ | |
690 | if (ms_idx < 0) | |
691 | continue; | |
692 | ||
693 | /* leave some space between memsegs, they are not IOVA | |
694 | * contiguous, so they shouldn't be VA contiguous either. | |
695 | */ | |
696 | if (!empty) | |
697 | ms_idx++; | |
698 | break; | |
699 | } | |
700 | if (msl_idx == RTE_MAX_MEMSEG_LISTS) { | |
701 | RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", | |
702 | RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE), | |
703 | RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE)); | |
704 | return -1; | |
705 | } | |
706 | ||
707 | #ifdef RTE_ARCH_PPC64 | |
708 | /* for PPC64 we go through the list backwards */ | |
709 | for (cur_page = seg_end - 1; cur_page >= seg_start; | |
710 | cur_page--, ms_idx++) { | |
711 | #else | |
712 | for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) { | |
713 | #endif | |
714 | struct hugepage_file *hfile = &hugepages[cur_page]; | |
715 | struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx); | |
716 | void *addr; | |
717 | int fd; | |
718 | ||
719 | fd = open(hfile->filepath, O_RDWR); | |
720 | if (fd < 0) { | |
721 | RTE_LOG(ERR, EAL, "Could not open '%s': %s\n", | |
722 | hfile->filepath, strerror(errno)); | |
723 | return -1; | |
724 | } | |
725 | /* set shared lock on the file. */ | |
726 | if (flock(fd, LOCK_SH) < 0) { | |
727 | RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n", | |
728 | hfile->filepath, strerror(errno)); | |
729 | close(fd); | |
730 | return -1; | |
731 | } | |
732 | memseg_len = (size_t)page_sz; | |
733 | addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len); | |
734 | ||
735 | /* we know this address is already mmapped by memseg list, so | |
736 | * using MAP_FIXED here is safe | |
737 | */ | |
738 | addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE, | |
739 | MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0); | |
740 | if (addr == MAP_FAILED) { | |
741 | RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n", | |
742 | hfile->filepath, strerror(errno)); | |
743 | close(fd); | |
744 | return -1; | |
745 | } | |
746 | ||
747 | /* we have a new address, so unmap previous one */ | |
748 | #ifndef RTE_ARCH_64 | |
749 | /* in 32-bit legacy mode, we have already unmapped the page */ | |
750 | if (!internal_config.legacy_mem) | |
751 | munmap(hfile->orig_va, page_sz); | |
752 | #else | |
753 | munmap(hfile->orig_va, page_sz); | |
754 | #endif | |
755 | ||
756 | hfile->orig_va = NULL; | |
757 | hfile->final_va = addr; | |
758 | ||
759 | /* rewrite physical addresses in IOVA as VA mode */ | |
760 | if (rte_eal_iova_mode() == RTE_IOVA_VA) | |
761 | hfile->physaddr = (uintptr_t)addr; | |
762 | ||
763 | /* set up memseg data */ | |
764 | ms->addr = addr; | |
765 | ms->hugepage_sz = page_sz; | |
766 | ms->len = memseg_len; | |
767 | ms->iova = hfile->physaddr; | |
768 | ms->socket_id = hfile->socket_id; | |
769 | ms->nchannel = rte_memory_get_nchannel(); | |
770 | ms->nrank = rte_memory_get_nrank(); | |
771 | ||
772 | rte_fbarray_set_used(arr, ms_idx); | |
773 | ||
774 | close(fd); | |
775 | } | |
776 | RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n", | |
777 | (seg_len * page_sz) >> 20, socket_id); | |
778 | return 0; | |
779 | } | |
780 | ||
781 | static uint64_t | |
782 | get_mem_amount(uint64_t page_sz, uint64_t max_mem) | |
783 | { | |
784 | uint64_t area_sz, max_pages; | |
785 | ||
786 | /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */ | |
787 | max_pages = RTE_MAX_MEMSEG_PER_LIST; | |
788 | max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem); | |
789 | ||
790 | area_sz = RTE_MIN(page_sz * max_pages, max_mem); | |
791 | ||
792 | /* make sure the list isn't smaller than the page size */ | |
793 | area_sz = RTE_MAX(area_sz, page_sz); | |
794 | ||
795 | return RTE_ALIGN(area_sz, page_sz); | |
796 | } | |
797 | ||
798 | static int | |
799 | free_memseg_list(struct rte_memseg_list *msl) | |
800 | { | |
801 | if (rte_fbarray_destroy(&msl->memseg_arr)) { | |
802 | RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n"); | |
803 | return -1; | |
804 | } | |
805 | memset(msl, 0, sizeof(*msl)); | |
806 | return 0; | |
807 | } | |
808 | ||
809 | #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i" | |
810 | static int | |
811 | alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz, | |
812 | int n_segs, int socket_id, int type_msl_idx) | |
813 | { | |
814 | char name[RTE_FBARRAY_NAME_LEN]; | |
815 | ||
816 | snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id, | |
817 | type_msl_idx); | |
818 | if (rte_fbarray_init(&msl->memseg_arr, name, n_segs, | |
819 | sizeof(struct rte_memseg))) { | |
820 | RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n", | |
821 | rte_strerror(rte_errno)); | |
822 | return -1; | |
823 | } | |
824 | ||
825 | msl->page_sz = page_sz; | |
826 | msl->socket_id = socket_id; | |
827 | msl->base_va = NULL; | |
828 | ||
829 | RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n", | |
830 | (size_t)page_sz >> 10, socket_id); | |
831 | ||
832 | return 0; | |
833 | } | |
834 | ||
835 | static int | |
836 | alloc_va_space(struct rte_memseg_list *msl) | |
837 | { | |
838 | uint64_t page_sz; | |
839 | size_t mem_sz; | |
840 | void *addr; | |
841 | int flags = 0; | |
842 | ||
843 | #ifdef RTE_ARCH_PPC_64 | |
844 | flags |= MAP_HUGETLB; | |
845 | #endif | |
846 | ||
847 | page_sz = msl->page_sz; | |
848 | mem_sz = page_sz * msl->memseg_arr.len; | |
849 | ||
850 | addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags); | |
851 | if (addr == NULL) { | |
852 | if (rte_errno == EADDRNOTAVAIL) | |
853 | RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n", | |
854 | (unsigned long long)mem_sz, msl->base_va); | |
855 | else | |
856 | RTE_LOG(ERR, EAL, "Cannot reserve memory\n"); | |
857 | return -1; | |
858 | } | |
859 | msl->base_va = addr; | |
860 | ||
861 | return 0; | |
862 | } | |
863 | ||
864 | /* | |
865 | * Our VA space is not preallocated yet, so preallocate it here. We need to know | |
866 | * how many segments there are in order to map all pages into one address space, | |
867 | * and leave appropriate holes between segments so that rte_malloc does not | |
868 | * concatenate them into one big segment. | |
869 | * | |
870 | * we also need to unmap original pages to free up address space. | |
871 | */ | |
872 | static int __rte_unused | |
873 | prealloc_segments(struct hugepage_file *hugepages, int n_pages) | |
874 | { | |
875 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
876 | int cur_page, seg_start_page, end_seg, new_memseg; | |
877 | unsigned int hpi_idx, socket, i; | |
878 | int n_contig_segs, n_segs; | |
879 | int msl_idx; | |
880 | ||
881 | /* before we preallocate segments, we need to free up our VA space. | |
882 | * we're not removing files, and we already have information about | |
883 | * PA-contiguousness, so it is safe to unmap everything. | |
884 | */ | |
885 | for (cur_page = 0; cur_page < n_pages; cur_page++) { | |
886 | struct hugepage_file *hpi = &hugepages[cur_page]; | |
887 | munmap(hpi->orig_va, hpi->size); | |
888 | hpi->orig_va = NULL; | |
889 | } | |
890 | ||
891 | /* we cannot know how many page sizes and sockets we have discovered, so | |
892 | * loop over all of them | |
893 | */ | |
894 | for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes; | |
895 | hpi_idx++) { | |
896 | uint64_t page_sz = | |
897 | internal_config.hugepage_info[hpi_idx].hugepage_sz; | |
898 | ||
899 | for (i = 0; i < rte_socket_count(); i++) { | |
900 | struct rte_memseg_list *msl; | |
901 | ||
902 | socket = rte_socket_id_by_idx(i); | |
903 | n_contig_segs = 0; | |
904 | n_segs = 0; | |
905 | seg_start_page = -1; | |
906 | ||
907 | for (cur_page = 0; cur_page < n_pages; cur_page++) { | |
908 | struct hugepage_file *prev, *cur; | |
909 | int prev_seg_start_page = -1; | |
910 | ||
911 | cur = &hugepages[cur_page]; | |
912 | prev = cur_page == 0 ? NULL : | |
913 | &hugepages[cur_page - 1]; | |
914 | ||
915 | new_memseg = 0; | |
916 | end_seg = 0; | |
917 | ||
918 | if (cur->size == 0) | |
919 | end_seg = 1; | |
920 | else if (cur->socket_id != (int) socket) | |
921 | end_seg = 1; | |
922 | else if (cur->size != page_sz) | |
923 | end_seg = 1; | |
924 | else if (cur_page == 0) | |
925 | new_memseg = 1; | |
926 | #ifdef RTE_ARCH_PPC_64 | |
927 | /* On PPC64 architecture, the mmap always start | |
928 | * from higher address to lower address. Here, | |
929 | * physical addresses are in descending order. | |
930 | */ | |
931 | else if ((prev->physaddr - cur->physaddr) != | |
932 | cur->size) | |
933 | new_memseg = 1; | |
934 | #else | |
935 | else if ((cur->physaddr - prev->physaddr) != | |
936 | cur->size) | |
937 | new_memseg = 1; | |
938 | #endif | |
939 | if (new_memseg) { | |
940 | /* if we're already inside a segment, | |
941 | * new segment means end of current one | |
942 | */ | |
943 | if (seg_start_page != -1) { | |
944 | end_seg = 1; | |
945 | prev_seg_start_page = | |
946 | seg_start_page; | |
947 | } | |
948 | seg_start_page = cur_page; | |
949 | } | |
950 | ||
951 | if (end_seg) { | |
952 | if (prev_seg_start_page != -1) { | |
953 | /* we've found a new segment */ | |
954 | n_contig_segs++; | |
955 | n_segs += cur_page - | |
956 | prev_seg_start_page; | |
957 | } else if (seg_start_page != -1) { | |
958 | /* we didn't find new segment, | |
959 | * but did end current one | |
960 | */ | |
961 | n_contig_segs++; | |
962 | n_segs += cur_page - | |
963 | seg_start_page; | |
964 | seg_start_page = -1; | |
965 | continue; | |
966 | } else { | |
967 | /* we're skipping this page */ | |
968 | continue; | |
969 | } | |
970 | } | |
971 | /* segment continues */ | |
972 | } | |
973 | /* check if we missed last segment */ | |
974 | if (seg_start_page != -1) { | |
975 | n_contig_segs++; | |
976 | n_segs += cur_page - seg_start_page; | |
977 | } | |
978 | ||
979 | /* if no segments were found, do not preallocate */ | |
980 | if (n_segs == 0) | |
981 | continue; | |
982 | ||
983 | /* we now have total number of pages that we will | |
984 | * allocate for this segment list. add separator pages | |
985 | * to the total count, and preallocate VA space. | |
986 | */ | |
987 | n_segs += n_contig_segs - 1; | |
988 | ||
989 | /* now, preallocate VA space for these segments */ | |
990 | ||
991 | /* first, find suitable memseg list for this */ | |
992 | for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; | |
993 | msl_idx++) { | |
994 | msl = &mcfg->memsegs[msl_idx]; | |
995 | ||
996 | if (msl->base_va != NULL) | |
997 | continue; | |
998 | break; | |
999 | } | |
1000 | if (msl_idx == RTE_MAX_MEMSEG_LISTS) { | |
1001 | RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n", | |
1002 | RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS)); | |
1003 | return -1; | |
1004 | } | |
1005 | ||
1006 | /* now, allocate fbarray itself */ | |
1007 | if (alloc_memseg_list(msl, page_sz, n_segs, socket, | |
1008 | msl_idx) < 0) | |
1009 | return -1; | |
1010 | ||
1011 | /* finally, allocate VA space */ | |
1012 | if (alloc_va_space(msl) < 0) | |
1013 | return -1; | |
1014 | } | |
1015 | } | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * We cannot reallocate memseg lists on the fly because PPC64 stores pages | |
1021 | * backwards, therefore we have to process the entire memseg first before | |
1022 | * remapping it into memseg list VA space. | |
1023 | */ | |
1024 | static int | |
1025 | remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) | |
1026 | { | |
1027 | int cur_page, seg_start_page, new_memseg, ret; | |
1028 | ||
1029 | seg_start_page = 0; | |
1030 | for (cur_page = 0; cur_page < n_pages; cur_page++) { | |
1031 | struct hugepage_file *prev, *cur; | |
1032 | ||
1033 | new_memseg = 0; | |
1034 | ||
1035 | cur = &hugepages[cur_page]; | |
1036 | prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1]; | |
1037 | ||
1038 | /* if size is zero, no more pages left */ | |
1039 | if (cur->size == 0) | |
1040 | break; | |
1041 | ||
1042 | if (cur_page == 0) | |
1043 | new_memseg = 1; | |
1044 | else if (cur->socket_id != prev->socket_id) | |
1045 | new_memseg = 1; | |
1046 | else if (cur->size != prev->size) | |
1047 | new_memseg = 1; | |
1048 | #ifdef RTE_ARCH_PPC_64 | |
1049 | /* On PPC64 architecture, the mmap always start from higher | |
1050 | * address to lower address. Here, physical addresses are in | |
1051 | * descending order. | |
1052 | */ | |
1053 | else if ((prev->physaddr - cur->physaddr) != cur->size) | |
1054 | new_memseg = 1; | |
1055 | #else | |
1056 | else if ((cur->physaddr - prev->physaddr) != cur->size) | |
1057 | new_memseg = 1; | |
1058 | #endif | |
1059 | ||
1060 | if (new_memseg) { | |
1061 | /* if this isn't the first time, remap segment */ | |
1062 | if (cur_page != 0) { | |
1063 | ret = remap_segment(hugepages, seg_start_page, | |
1064 | cur_page); | |
1065 | if (ret != 0) | |
1066 | return -1; | |
1067 | } | |
1068 | /* remember where we started */ | |
1069 | seg_start_page = cur_page; | |
1070 | } | |
1071 | /* continuation of previous memseg */ | |
1072 | } | |
1073 | /* we were stopped, but we didn't remap the last segment, do it now */ | |
1074 | if (cur_page != 0) { | |
1075 | ret = remap_segment(hugepages, seg_start_page, | |
1076 | cur_page); | |
1077 | if (ret != 0) | |
1078 | return -1; | |
1079 | } | |
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | static inline uint64_t | |
1084 | get_socket_mem_size(int socket) | |
1085 | { | |
1086 | uint64_t size = 0; | |
1087 | unsigned i; | |
1088 | ||
1089 | for (i = 0; i < internal_config.num_hugepage_sizes; i++){ | |
1090 | struct hugepage_info *hpi = &internal_config.hugepage_info[i]; | |
1091 | size += hpi->hugepage_sz * hpi->num_pages[socket]; | |
1092 | } | |
1093 | ||
1094 | return size; | |
1095 | } | |
1096 | ||
1097 | /* | |
1098 | * This function is a NUMA-aware equivalent of calc_num_pages. | |
1099 | * It takes in the list of hugepage sizes and the | |
1100 | * number of pages thereof, and calculates the best number of | |
1101 | * pages of each size to fulfill the request for <memory> ram | |
1102 | */ | |
1103 | static int | |
1104 | calc_num_pages_per_socket(uint64_t * memory, | |
1105 | struct hugepage_info *hp_info, | |
1106 | struct hugepage_info *hp_used, | |
1107 | unsigned num_hp_info) | |
1108 | { | |
1109 | unsigned socket, j, i = 0; | |
1110 | unsigned requested, available; | |
1111 | int total_num_pages = 0; | |
1112 | uint64_t remaining_mem, cur_mem; | |
1113 | uint64_t total_mem = internal_config.memory; | |
1114 | ||
1115 | if (num_hp_info == 0) | |
1116 | return -1; | |
1117 | ||
1118 | /* if specific memory amounts per socket weren't requested */ | |
1119 | if (internal_config.force_sockets == 0) { | |
1120 | size_t total_size; | |
1121 | #ifdef RTE_ARCH_64 | |
1122 | int cpu_per_socket[RTE_MAX_NUMA_NODES]; | |
1123 | size_t default_size; | |
1124 | unsigned lcore_id; | |
1125 | ||
1126 | /* Compute number of cores per socket */ | |
1127 | memset(cpu_per_socket, 0, sizeof(cpu_per_socket)); | |
1128 | RTE_LCORE_FOREACH(lcore_id) { | |
1129 | cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++; | |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * Automatically spread requested memory amongst detected sockets according | |
1134 | * to number of cores from cpu mask present on each socket | |
1135 | */ | |
1136 | total_size = internal_config.memory; | |
1137 | for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { | |
1138 | ||
1139 | /* Set memory amount per socket */ | |
1140 | default_size = (internal_config.memory * cpu_per_socket[socket]) | |
1141 | / rte_lcore_count(); | |
1142 | ||
1143 | /* Limit to maximum available memory on socket */ | |
1144 | default_size = RTE_MIN(default_size, get_socket_mem_size(socket)); | |
1145 | ||
1146 | /* Update sizes */ | |
1147 | memory[socket] = default_size; | |
1148 | total_size -= default_size; | |
1149 | } | |
1150 | ||
1151 | /* | |
1152 | * If some memory is remaining, try to allocate it by getting all | |
1153 | * available memory from sockets, one after the other | |
1154 | */ | |
1155 | for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { | |
1156 | /* take whatever is available */ | |
1157 | default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket], | |
1158 | total_size); | |
1159 | ||
1160 | /* Update sizes */ | |
1161 | memory[socket] += default_size; | |
1162 | total_size -= default_size; | |
1163 | } | |
1164 | #else | |
1165 | /* in 32-bit mode, allocate all of the memory only on master | |
1166 | * lcore socket | |
1167 | */ | |
1168 | total_size = internal_config.memory; | |
1169 | for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; | |
1170 | socket++) { | |
1171 | struct rte_config *cfg = rte_eal_get_configuration(); | |
1172 | unsigned int master_lcore_socket; | |
1173 | ||
1174 | master_lcore_socket = | |
1175 | rte_lcore_to_socket_id(cfg->master_lcore); | |
1176 | ||
1177 | if (master_lcore_socket != socket) | |
1178 | continue; | |
1179 | ||
1180 | /* Update sizes */ | |
1181 | memory[socket] = total_size; | |
1182 | break; | |
1183 | } | |
1184 | #endif | |
1185 | } | |
1186 | ||
1187 | for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) { | |
1188 | /* skips if the memory on specific socket wasn't requested */ | |
1189 | for (i = 0; i < num_hp_info && memory[socket] != 0; i++){ | |
1190 | strlcpy(hp_used[i].hugedir, hp_info[i].hugedir, | |
1191 | sizeof(hp_used[i].hugedir)); | |
1192 | hp_used[i].num_pages[socket] = RTE_MIN( | |
1193 | memory[socket] / hp_info[i].hugepage_sz, | |
1194 | hp_info[i].num_pages[socket]); | |
1195 | ||
1196 | cur_mem = hp_used[i].num_pages[socket] * | |
1197 | hp_used[i].hugepage_sz; | |
1198 | ||
1199 | memory[socket] -= cur_mem; | |
1200 | total_mem -= cur_mem; | |
1201 | ||
1202 | total_num_pages += hp_used[i].num_pages[socket]; | |
1203 | ||
1204 | /* check if we have met all memory requests */ | |
1205 | if (memory[socket] == 0) | |
1206 | break; | |
1207 | ||
1208 | /* check if we have any more pages left at this size, if so | |
1209 | * move on to next size */ | |
1210 | if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket]) | |
1211 | continue; | |
1212 | /* At this point we know that there are more pages available that are | |
1213 | * bigger than the memory we want, so lets see if we can get enough | |
1214 | * from other page sizes. | |
1215 | */ | |
1216 | remaining_mem = 0; | |
1217 | for (j = i+1; j < num_hp_info; j++) | |
1218 | remaining_mem += hp_info[j].hugepage_sz * | |
1219 | hp_info[j].num_pages[socket]; | |
1220 | ||
1221 | /* is there enough other memory, if not allocate another page and quit */ | |
1222 | if (remaining_mem < memory[socket]){ | |
1223 | cur_mem = RTE_MIN(memory[socket], | |
1224 | hp_info[i].hugepage_sz); | |
1225 | memory[socket] -= cur_mem; | |
1226 | total_mem -= cur_mem; | |
1227 | hp_used[i].num_pages[socket]++; | |
1228 | total_num_pages++; | |
1229 | break; /* we are done with this socket*/ | |
1230 | } | |
1231 | } | |
1232 | /* if we didn't satisfy all memory requirements per socket */ | |
1233 | if (memory[socket] > 0 && | |
1234 | internal_config.socket_mem[socket] != 0) { | |
1235 | /* to prevent icc errors */ | |
1236 | requested = (unsigned) (internal_config.socket_mem[socket] / | |
1237 | 0x100000); | |
1238 | available = requested - | |
1239 | ((unsigned) (memory[socket] / 0x100000)); | |
1240 | RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! " | |
1241 | "Requested: %uMB, available: %uMB\n", socket, | |
1242 | requested, available); | |
1243 | return -1; | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | /* if we didn't satisfy total memory requirements */ | |
1248 | if (total_mem > 0) { | |
1249 | requested = (unsigned) (internal_config.memory / 0x100000); | |
1250 | available = requested - (unsigned) (total_mem / 0x100000); | |
1251 | RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB," | |
1252 | " available: %uMB\n", requested, available); | |
1253 | return -1; | |
1254 | } | |
1255 | return total_num_pages; | |
1256 | } | |
1257 | ||
1258 | static inline size_t | |
1259 | eal_get_hugepage_mem_size(void) | |
1260 | { | |
1261 | uint64_t size = 0; | |
1262 | unsigned i, j; | |
1263 | ||
1264 | for (i = 0; i < internal_config.num_hugepage_sizes; i++) { | |
1265 | struct hugepage_info *hpi = &internal_config.hugepage_info[i]; | |
1266 | if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) { | |
1267 | for (j = 0; j < RTE_MAX_NUMA_NODES; j++) { | |
1268 | size += hpi->hugepage_sz * hpi->num_pages[j]; | |
1269 | } | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX; | |
1274 | } | |
1275 | ||
1276 | static struct sigaction huge_action_old; | |
1277 | static int huge_need_recover; | |
1278 | ||
1279 | static void | |
1280 | huge_register_sigbus(void) | |
1281 | { | |
1282 | sigset_t mask; | |
1283 | struct sigaction action; | |
1284 | ||
1285 | sigemptyset(&mask); | |
1286 | sigaddset(&mask, SIGBUS); | |
1287 | action.sa_flags = 0; | |
1288 | action.sa_mask = mask; | |
1289 | action.sa_handler = huge_sigbus_handler; | |
1290 | ||
1291 | huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old); | |
1292 | } | |
1293 | ||
1294 | static void | |
1295 | huge_recover_sigbus(void) | |
1296 | { | |
1297 | if (huge_need_recover) { | |
1298 | sigaction(SIGBUS, &huge_action_old, NULL); | |
1299 | huge_need_recover = 0; | |
1300 | } | |
1301 | } | |
1302 | ||
1303 | /* | |
1304 | * Prepare physical memory mapping: fill configuration structure with | |
1305 | * these infos, return 0 on success. | |
1306 | * 1. map N huge pages in separate files in hugetlbfs | |
1307 | * 2. find associated physical addr | |
1308 | * 3. find associated NUMA socket ID | |
1309 | * 4. sort all huge pages by physical address | |
1310 | * 5. remap these N huge pages in the correct order | |
1311 | * 6. unmap the first mapping | |
1312 | * 7. fill memsegs in configuration with contiguous zones | |
1313 | */ | |
1314 | static int | |
1315 | eal_legacy_hugepage_init(void) | |
1316 | { | |
1317 | struct rte_mem_config *mcfg; | |
1318 | struct hugepage_file *hugepage = NULL, *tmp_hp = NULL; | |
1319 | struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES]; | |
1320 | struct rte_fbarray *arr; | |
1321 | struct rte_memseg *ms; | |
1322 | ||
1323 | uint64_t memory[RTE_MAX_NUMA_NODES]; | |
1324 | ||
1325 | unsigned hp_offset; | |
1326 | int i, j; | |
1327 | int nr_hugefiles, nr_hugepages = 0; | |
1328 | void *addr; | |
1329 | ||
1330 | test_phys_addrs_available(); | |
1331 | ||
1332 | memset(used_hp, 0, sizeof(used_hp)); | |
1333 | ||
1334 | /* get pointer to global configuration */ | |
1335 | mcfg = rte_eal_get_configuration()->mem_config; | |
1336 | ||
1337 | /* hugetlbfs can be disabled */ | |
1338 | if (internal_config.no_hugetlbfs) { | |
1339 | struct rte_memseg_list *msl; | |
1340 | uint64_t page_sz; | |
1341 | int n_segs, cur_seg; | |
1342 | ||
1343 | /* nohuge mode is legacy mode */ | |
1344 | internal_config.legacy_mem = 1; | |
1345 | ||
1346 | /* create a memseg list */ | |
1347 | msl = &mcfg->memsegs[0]; | |
1348 | ||
1349 | page_sz = RTE_PGSIZE_4K; | |
1350 | n_segs = internal_config.memory / page_sz; | |
1351 | ||
1352 | if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs, | |
1353 | sizeof(struct rte_memseg))) { | |
1354 | RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n"); | |
1355 | return -1; | |
1356 | } | |
1357 | ||
1358 | addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE, | |
1359 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
1360 | if (addr == MAP_FAILED) { | |
1361 | RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__, | |
1362 | strerror(errno)); | |
1363 | return -1; | |
1364 | } | |
1365 | msl->base_va = addr; | |
1366 | msl->page_sz = page_sz; | |
1367 | msl->socket_id = 0; | |
1368 | ||
1369 | /* populate memsegs. each memseg is one page long */ | |
1370 | for (cur_seg = 0; cur_seg < n_segs; cur_seg++) { | |
1371 | arr = &msl->memseg_arr; | |
1372 | ||
1373 | ms = rte_fbarray_get(arr, cur_seg); | |
1374 | if (rte_eal_iova_mode() == RTE_IOVA_VA) | |
1375 | ms->iova = (uintptr_t)addr; | |
1376 | else | |
1377 | ms->iova = RTE_BAD_IOVA; | |
1378 | ms->addr = addr; | |
1379 | ms->hugepage_sz = page_sz; | |
1380 | ms->socket_id = 0; | |
1381 | ms->len = page_sz; | |
1382 | ||
1383 | rte_fbarray_set_used(arr, cur_seg); | |
1384 | ||
1385 | addr = RTE_PTR_ADD(addr, (size_t)page_sz); | |
1386 | } | |
1387 | return 0; | |
1388 | } | |
1389 | ||
1390 | /* allocate single hugetlbfs file on the master numa node */ | |
1391 | if (internal_config.single_file_segments) { | |
1392 | struct hugepage_info *hpi = NULL; | |
1393 | struct rte_memseg_list *msl; | |
1394 | size_t vma_len; | |
1395 | int n_segs, cur_seg; | |
1396 | char filepath[PATH_MAX]; | |
1397 | unsigned node_id = 0; | |
1398 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
1399 | int oldpolicy; | |
1400 | struct bitmask *oldmask = numa_allocate_nodemask(); | |
1401 | bool have_numa = true; | |
1402 | ||
1403 | node_id = rte_lcore_to_socket_id(rte_get_master_lcore()); | |
1404 | if (numa_available() != 0) { | |
1405 | RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n"); | |
1406 | have_numa = false; | |
1407 | } else { | |
1408 | RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n"); | |
1409 | if (get_mempolicy(&oldpolicy, oldmask->maskp, | |
1410 | oldmask->size + 1, 0, 0) < 0) { | |
1411 | RTE_LOG(ERR, EAL, | |
1412 | "Failed to get current mempolicy: %s. " | |
1413 | "Assuming MPOL_DEFAULT.\n", strerror(errno)); | |
1414 | oldpolicy = MPOL_DEFAULT; | |
1415 | } | |
1416 | ||
1417 | RTE_LOG(DEBUG, EAL, | |
1418 | "Setting policy MPOL_PREFERRED for socket %d\n", | |
1419 | node_id); | |
1420 | numa_set_preferred(node_id); | |
1421 | } | |
1422 | #endif | |
1423 | ||
1424 | if (internal_config.memory == 0 && internal_config.force_sockets == 0) | |
1425 | internal_config.memory = eal_get_hugepage_mem_size(); | |
1426 | ||
1427 | /* choose optimal hugetlbfs for the mapping */ | |
1428 | for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) { | |
1429 | hpi = &internal_config.hugepage_info[i]; | |
1430 | if (hpi->hugepage_sz > internal_config.memory || | |
1431 | hpi->num_pages[0] * hpi->hugepage_sz < | |
1432 | internal_config.memory) | |
1433 | hpi = NULL; | |
1434 | } | |
1435 | ||
1436 | if (hpi == NULL) { | |
1437 | RTE_LOG(ERR, EAL, | |
1438 | "Cannot find a single hugetlbfs with %"PRIu64" MB free mem.\n", | |
1439 | internal_config.memory); | |
1440 | return -1; | |
1441 | } | |
1442 | ||
1443 | eal_get_hugefile_path(filepath, sizeof(filepath), hpi->hugedir, 0); | |
1444 | filepath[sizeof(filepath) - 1] = '\0'; | |
1445 | ||
1446 | /* try to create hugepage file */ | |
1447 | int fd = open(filepath, O_CREAT | O_RDWR, 0600); | |
1448 | if (fd < 0) { | |
1449 | RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__, | |
1450 | strerror(errno)); | |
1451 | return -1; | |
1452 | } | |
1453 | ||
1454 | /* length needs to be manually aligned for future munmap */ | |
1455 | vma_len = RTE_ALIGN_CEIL(internal_config.memory, hpi->hugepage_sz); | |
1456 | addr = eal_get_virtual_area(NULL, &vma_len, hpi->hugepage_sz, 0, 0); | |
1457 | if (addr == NULL) { | |
1458 | RTE_LOG(ERR, EAL, | |
1459 | "Cannot reserve virtually-contiguous %"PRIu64" MB.\n", | |
1460 | internal_config.memory); | |
1461 | return -1; | |
1462 | } | |
1463 | ||
1464 | addr = mmap(addr, vma_len, PROT_READ | PROT_WRITE, | |
1465 | MAP_SHARED | MAP_POPULATE, fd, 0); | |
1466 | if (addr == MAP_FAILED) { | |
1467 | RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__, | |
1468 | strerror(errno)); | |
1469 | return -1; | |
1470 | } | |
1471 | ||
1472 | #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
1473 | if (have_numa) { | |
1474 | RTE_LOG(DEBUG, EAL, | |
1475 | "Restoring previous memory policy: %d\n", oldpolicy); | |
1476 | if (oldpolicy == MPOL_DEFAULT) { | |
1477 | numa_set_localalloc(); | |
1478 | } else if (set_mempolicy(oldpolicy, oldmask->maskp, | |
1479 | oldmask->size + 1) < 0) { | |
1480 | RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n", | |
1481 | strerror(errno)); | |
1482 | numa_set_localalloc(); | |
1483 | } | |
1484 | } | |
1485 | numa_free_cpumask(oldmask); | |
1486 | #endif | |
1487 | /* create a memseg list */ | |
1488 | msl = &mcfg->memsegs[0]; | |
1489 | ||
1490 | n_segs = vma_len / hpi->hugepage_sz; | |
1491 | ||
1492 | if (rte_fbarray_init(&msl->memseg_arr, "singlefileseg", n_segs, | |
1493 | sizeof(struct rte_memseg))) { | |
1494 | RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n"); | |
1495 | return -1; | |
1496 | } | |
1497 | ||
1498 | msl->base_va = addr; | |
1499 | msl->page_sz = hpi->hugepage_sz; | |
1500 | msl->socket_id = node_id; | |
1501 | ||
1502 | /* populate memsegs. each memseg is one page long */ | |
1503 | for (cur_seg = 0; cur_seg < n_segs; cur_seg++) { | |
1504 | arr = &msl->memseg_arr; | |
1505 | ||
1506 | ms = rte_fbarray_get(arr, cur_seg); | |
1507 | if (rte_eal_iova_mode() == RTE_IOVA_VA) | |
1508 | ms->iova = (uintptr_t)addr; | |
1509 | else | |
1510 | ms->iova = RTE_BAD_IOVA; | |
1511 | ms->addr = addr; | |
1512 | ms->hugepage_sz = hpi->hugepage_sz; | |
1513 | ms->socket_id = node_id; | |
1514 | ms->len = hpi->hugepage_sz; | |
1515 | ||
1516 | rte_fbarray_set_used(arr, cur_seg); | |
1517 | ||
1518 | addr = RTE_PTR_ADD(addr, (size_t)hpi->hugepage_sz); | |
1519 | } | |
1520 | ||
1521 | return 0; | |
1522 | } | |
1523 | ||
1524 | /* calculate total number of hugepages available. at this point we haven't | |
1525 | * yet started sorting them so they all are on socket 0 */ | |
1526 | for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) { | |
1527 | /* meanwhile, also initialize used_hp hugepage sizes in used_hp */ | |
1528 | used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz; | |
1529 | ||
1530 | nr_hugepages += internal_config.hugepage_info[i].num_pages[0]; | |
1531 | } | |
1532 | ||
1533 | /* | |
1534 | * allocate a memory area for hugepage table. | |
1535 | * this isn't shared memory yet. due to the fact that we need some | |
1536 | * processing done on these pages, shared memory will be created | |
1537 | * at a later stage. | |
1538 | */ | |
1539 | tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file)); | |
1540 | if (tmp_hp == NULL) | |
1541 | goto fail; | |
1542 | ||
1543 | memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file)); | |
1544 | ||
1545 | hp_offset = 0; /* where we start the current page size entries */ | |
1546 | ||
1547 | huge_register_sigbus(); | |
1548 | ||
1549 | /* make a copy of socket_mem, needed for balanced allocation. */ | |
1550 | for (i = 0; i < RTE_MAX_NUMA_NODES; i++) | |
1551 | memory[i] = internal_config.socket_mem[i]; | |
1552 | ||
1553 | /* map all hugepages and sort them */ | |
1554 | for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){ | |
1555 | unsigned pages_old, pages_new; | |
1556 | struct hugepage_info *hpi; | |
1557 | ||
1558 | /* | |
1559 | * we don't yet mark hugepages as used at this stage, so | |
1560 | * we just map all hugepages available to the system | |
1561 | * all hugepages are still located on socket 0 | |
1562 | */ | |
1563 | hpi = &internal_config.hugepage_info[i]; | |
1564 | ||
1565 | if (hpi->num_pages[0] == 0) | |
1566 | continue; | |
1567 | ||
1568 | /* map all hugepages available */ | |
1569 | pages_old = hpi->num_pages[0]; | |
1570 | pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory); | |
1571 | if (pages_new < pages_old) { | |
1572 | RTE_LOG(DEBUG, EAL, | |
1573 | "%d not %d hugepages of size %u MB allocated\n", | |
1574 | pages_new, pages_old, | |
1575 | (unsigned)(hpi->hugepage_sz / 0x100000)); | |
1576 | ||
1577 | int pages = pages_old - pages_new; | |
1578 | ||
1579 | nr_hugepages -= pages; | |
1580 | hpi->num_pages[0] = pages_new; | |
1581 | if (pages_new == 0) | |
1582 | continue; | |
1583 | } | |
1584 | ||
1585 | if (phys_addrs_available && | |
1586 | rte_eal_iova_mode() != RTE_IOVA_VA) { | |
1587 | /* find physical addresses for each hugepage */ | |
1588 | if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) { | |
1589 | RTE_LOG(DEBUG, EAL, "Failed to find phys addr " | |
1590 | "for %u MB pages\n", | |
1591 | (unsigned int)(hpi->hugepage_sz / 0x100000)); | |
1592 | goto fail; | |
1593 | } | |
1594 | } else { | |
1595 | /* set physical addresses for each hugepage */ | |
1596 | if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) { | |
1597 | RTE_LOG(DEBUG, EAL, "Failed to set phys addr " | |
1598 | "for %u MB pages\n", | |
1599 | (unsigned int)(hpi->hugepage_sz / 0x100000)); | |
1600 | goto fail; | |
1601 | } | |
1602 | } | |
1603 | ||
1604 | if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){ | |
1605 | RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n", | |
1606 | (unsigned)(hpi->hugepage_sz / 0x100000)); | |
1607 | goto fail; | |
1608 | } | |
1609 | ||
1610 | qsort(&tmp_hp[hp_offset], hpi->num_pages[0], | |
1611 | sizeof(struct hugepage_file), cmp_physaddr); | |
1612 | ||
1613 | /* we have processed a num of hugepages of this size, so inc offset */ | |
1614 | hp_offset += hpi->num_pages[0]; | |
1615 | } | |
1616 | ||
1617 | huge_recover_sigbus(); | |
1618 | ||
1619 | if (internal_config.memory == 0 && internal_config.force_sockets == 0) | |
1620 | internal_config.memory = eal_get_hugepage_mem_size(); | |
1621 | ||
1622 | nr_hugefiles = nr_hugepages; | |
1623 | ||
1624 | ||
1625 | /* clean out the numbers of pages */ | |
1626 | for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) | |
1627 | for (j = 0; j < RTE_MAX_NUMA_NODES; j++) | |
1628 | internal_config.hugepage_info[i].num_pages[j] = 0; | |
1629 | ||
1630 | /* get hugepages for each socket */ | |
1631 | for (i = 0; i < nr_hugefiles; i++) { | |
1632 | int socket = tmp_hp[i].socket_id; | |
1633 | ||
1634 | /* find a hugepage info with right size and increment num_pages */ | |
1635 | const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES, | |
1636 | (int)internal_config.num_hugepage_sizes); | |
1637 | for (j = 0; j < nb_hpsizes; j++) { | |
1638 | if (tmp_hp[i].size == | |
1639 | internal_config.hugepage_info[j].hugepage_sz) { | |
1640 | internal_config.hugepage_info[j].num_pages[socket]++; | |
1641 | } | |
1642 | } | |
1643 | } | |
1644 | ||
1645 | /* make a copy of socket_mem, needed for number of pages calculation */ | |
1646 | for (i = 0; i < RTE_MAX_NUMA_NODES; i++) | |
1647 | memory[i] = internal_config.socket_mem[i]; | |
1648 | ||
1649 | /* calculate final number of pages */ | |
1650 | nr_hugepages = calc_num_pages_per_socket(memory, | |
1651 | internal_config.hugepage_info, used_hp, | |
1652 | internal_config.num_hugepage_sizes); | |
1653 | ||
1654 | /* error if not enough memory available */ | |
1655 | if (nr_hugepages < 0) | |
1656 | goto fail; | |
1657 | ||
1658 | /* reporting in! */ | |
1659 | for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) { | |
1660 | for (j = 0; j < RTE_MAX_NUMA_NODES; j++) { | |
1661 | if (used_hp[i].num_pages[j] > 0) { | |
1662 | RTE_LOG(DEBUG, EAL, | |
1663 | "Requesting %u pages of size %uMB" | |
1664 | " from socket %i\n", | |
1665 | used_hp[i].num_pages[j], | |
1666 | (unsigned) | |
1667 | (used_hp[i].hugepage_sz / 0x100000), | |
1668 | j); | |
1669 | } | |
1670 | } | |
1671 | } | |
1672 | ||
1673 | /* create shared memory */ | |
1674 | hugepage = create_shared_memory(eal_hugepage_data_path(), | |
1675 | nr_hugefiles * sizeof(struct hugepage_file)); | |
1676 | ||
1677 | if (hugepage == NULL) { | |
1678 | RTE_LOG(ERR, EAL, "Failed to create shared memory!\n"); | |
1679 | goto fail; | |
1680 | } | |
1681 | memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file)); | |
1682 | ||
1683 | /* | |
1684 | * unmap pages that we won't need (looks at used_hp). | |
1685 | * also, sets final_va to NULL on pages that were unmapped. | |
1686 | */ | |
1687 | if (unmap_unneeded_hugepages(tmp_hp, used_hp, | |
1688 | internal_config.num_hugepage_sizes) < 0) { | |
1689 | RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n"); | |
1690 | goto fail; | |
1691 | } | |
1692 | ||
1693 | /* | |
1694 | * copy stuff from malloc'd hugepage* to the actual shared memory. | |
1695 | * this procedure only copies those hugepages that have orig_va | |
1696 | * not NULL. has overflow protection. | |
1697 | */ | |
1698 | if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles, | |
1699 | tmp_hp, nr_hugefiles) < 0) { | |
1700 | RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n"); | |
1701 | goto fail; | |
1702 | } | |
1703 | ||
1704 | #ifndef RTE_ARCH_64 | |
1705 | /* for legacy 32-bit mode, we did not preallocate VA space, so do it */ | |
1706 | if (internal_config.legacy_mem && | |
1707 | prealloc_segments(hugepage, nr_hugefiles)) { | |
1708 | RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n"); | |
1709 | goto fail; | |
1710 | } | |
1711 | #endif | |
1712 | ||
1713 | /* remap all pages we do need into memseg list VA space, so that those | |
1714 | * pages become first-class citizens in DPDK memory subsystem | |
1715 | */ | |
1716 | if (remap_needed_hugepages(hugepage, nr_hugefiles)) { | |
1717 | RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n"); | |
1718 | goto fail; | |
1719 | } | |
1720 | ||
1721 | /* free the hugepage backing files */ | |
1722 | if (internal_config.hugepage_unlink && | |
1723 | unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) { | |
1724 | RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n"); | |
1725 | goto fail; | |
1726 | } | |
1727 | ||
1728 | /* free the temporary hugepage table */ | |
1729 | free(tmp_hp); | |
1730 | tmp_hp = NULL; | |
1731 | ||
1732 | munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file)); | |
1733 | ||
1734 | /* we're not going to allocate more pages, so release VA space for | |
1735 | * unused memseg lists | |
1736 | */ | |
1737 | for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) { | |
1738 | struct rte_memseg_list *msl = &mcfg->memsegs[i]; | |
1739 | size_t mem_sz; | |
1740 | ||
1741 | /* skip inactive lists */ | |
1742 | if (msl->base_va == NULL) | |
1743 | continue; | |
1744 | /* skip lists where there is at least one page allocated */ | |
1745 | if (msl->memseg_arr.count > 0) | |
1746 | continue; | |
1747 | /* this is an unused list, deallocate it */ | |
1748 | mem_sz = (size_t)msl->page_sz * msl->memseg_arr.len; | |
1749 | munmap(msl->base_va, mem_sz); | |
1750 | msl->base_va = NULL; | |
1751 | ||
1752 | /* destroy backing fbarray */ | |
1753 | rte_fbarray_destroy(&msl->memseg_arr); | |
1754 | } | |
1755 | ||
1756 | return 0; | |
1757 | ||
1758 | fail: | |
1759 | huge_recover_sigbus(); | |
1760 | free(tmp_hp); | |
1761 | if (hugepage != NULL) | |
1762 | munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file)); | |
1763 | ||
1764 | return -1; | |
1765 | } | |
1766 | ||
1767 | static int __rte_unused | |
1768 | hugepage_count_walk(const struct rte_memseg_list *msl, void *arg) | |
1769 | { | |
1770 | struct hugepage_info *hpi = arg; | |
1771 | ||
1772 | if (msl->page_sz != hpi->hugepage_sz) | |
1773 | return 0; | |
1774 | ||
1775 | hpi->num_pages[msl->socket_id] += msl->memseg_arr.len; | |
1776 | return 0; | |
1777 | } | |
1778 | ||
1779 | static int | |
1780 | limits_callback(int socket_id, size_t cur_limit, size_t new_len) | |
1781 | { | |
1782 | RTE_SET_USED(socket_id); | |
1783 | RTE_SET_USED(cur_limit); | |
1784 | RTE_SET_USED(new_len); | |
1785 | return -1; | |
1786 | } | |
1787 | ||
1788 | static int | |
1789 | eal_hugepage_init(void) | |
1790 | { | |
1791 | struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES]; | |
1792 | uint64_t memory[RTE_MAX_NUMA_NODES]; | |
1793 | int hp_sz_idx, socket_id; | |
1794 | ||
1795 | test_phys_addrs_available(); | |
1796 | ||
1797 | memset(used_hp, 0, sizeof(used_hp)); | |
1798 | ||
1799 | for (hp_sz_idx = 0; | |
1800 | hp_sz_idx < (int) internal_config.num_hugepage_sizes; | |
1801 | hp_sz_idx++) { | |
1802 | #ifndef RTE_ARCH_64 | |
1803 | struct hugepage_info dummy; | |
1804 | unsigned int i; | |
1805 | #endif | |
1806 | /* also initialize used_hp hugepage sizes in used_hp */ | |
1807 | struct hugepage_info *hpi; | |
1808 | hpi = &internal_config.hugepage_info[hp_sz_idx]; | |
1809 | used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz; | |
1810 | ||
1811 | #ifndef RTE_ARCH_64 | |
1812 | /* for 32-bit, limit number of pages on socket to whatever we've | |
1813 | * preallocated, as we cannot allocate more. | |
1814 | */ | |
1815 | memset(&dummy, 0, sizeof(dummy)); | |
1816 | dummy.hugepage_sz = hpi->hugepage_sz; | |
1817 | if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0) | |
1818 | return -1; | |
1819 | ||
1820 | for (i = 0; i < RTE_DIM(dummy.num_pages); i++) { | |
1821 | hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i], | |
1822 | dummy.num_pages[i]); | |
1823 | } | |
1824 | #endif | |
1825 | } | |
1826 | ||
1827 | /* make a copy of socket_mem, needed for balanced allocation. */ | |
1828 | for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++) | |
1829 | memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx]; | |
1830 | ||
1831 | /* calculate final number of pages */ | |
1832 | if (calc_num_pages_per_socket(memory, | |
1833 | internal_config.hugepage_info, used_hp, | |
1834 | internal_config.num_hugepage_sizes) < 0) | |
1835 | return -1; | |
1836 | ||
1837 | for (hp_sz_idx = 0; | |
1838 | hp_sz_idx < (int)internal_config.num_hugepage_sizes; | |
1839 | hp_sz_idx++) { | |
1840 | for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES; | |
1841 | socket_id++) { | |
1842 | struct rte_memseg **pages; | |
1843 | struct hugepage_info *hpi = &used_hp[hp_sz_idx]; | |
1844 | unsigned int num_pages = hpi->num_pages[socket_id]; | |
1845 | int num_pages_alloc, i; | |
1846 | ||
1847 | if (num_pages == 0) | |
1848 | continue; | |
1849 | ||
1850 | pages = malloc(sizeof(*pages) * num_pages); | |
1851 | ||
1852 | RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n", | |
1853 | num_pages, hpi->hugepage_sz >> 20, socket_id); | |
1854 | ||
1855 | num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages, | |
1856 | num_pages, hpi->hugepage_sz, | |
1857 | socket_id, true); | |
1858 | if (num_pages_alloc < 0) { | |
1859 | free(pages); | |
1860 | return -1; | |
1861 | } | |
1862 | ||
1863 | /* mark preallocated pages as unfreeable */ | |
1864 | for (i = 0; i < num_pages_alloc; i++) { | |
1865 | struct rte_memseg *ms = pages[i]; | |
1866 | ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE; | |
1867 | } | |
1868 | free(pages); | |
1869 | } | |
1870 | } | |
1871 | /* if socket limits were specified, set them */ | |
1872 | if (internal_config.force_socket_limits) { | |
1873 | unsigned int i; | |
1874 | for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { | |
1875 | uint64_t limit = internal_config.socket_limit[i]; | |
1876 | if (limit == 0) | |
1877 | continue; | |
1878 | if (rte_mem_alloc_validator_register("socket-limit", | |
1879 | limits_callback, i, limit)) | |
1880 | RTE_LOG(ERR, EAL, "Failed to register socket limits validator callback\n"); | |
1881 | } | |
1882 | } | |
1883 | return 0; | |
1884 | } | |
1885 | ||
1886 | /* | |
1887 | * uses fstat to report the size of a file on disk | |
1888 | */ | |
1889 | static off_t | |
1890 | getFileSize(int fd) | |
1891 | { | |
1892 | struct stat st; | |
1893 | if (fstat(fd, &st) < 0) | |
1894 | return 0; | |
1895 | return st.st_size; | |
1896 | } | |
1897 | ||
1898 | /* | |
1899 | * This creates the memory mappings in the secondary process to match that of | |
1900 | * the server process. It goes through each memory segment in the DPDK runtime | |
1901 | * configuration and finds the hugepages which form that segment, mapping them | |
1902 | * in order to form a contiguous block in the virtual memory space | |
1903 | */ | |
1904 | static int | |
1905 | eal_legacy_hugepage_attach(void) | |
1906 | { | |
1907 | struct hugepage_file *hp = NULL; | |
1908 | unsigned int num_hp = 0; | |
1909 | unsigned int i = 0; | |
1910 | unsigned int cur_seg; | |
1911 | off_t size = 0; | |
1912 | int fd, fd_hugepage = -1; | |
1913 | ||
1914 | if (aslr_enabled() > 0) { | |
1915 | RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization " | |
1916 | "(ASLR) is enabled in the kernel.\n"); | |
1917 | RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory " | |
1918 | "into secondary processes\n"); | |
1919 | } | |
1920 | ||
1921 | test_phys_addrs_available(); | |
1922 | ||
1923 | fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY); | |
1924 | if (fd_hugepage < 0) { | |
1925 | RTE_LOG(ERR, EAL, "Could not open %s\n", | |
1926 | eal_hugepage_data_path()); | |
1927 | goto error; | |
1928 | } | |
1929 | ||
1930 | size = getFileSize(fd_hugepage); | |
1931 | hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0); | |
1932 | if (hp == MAP_FAILED) { | |
1933 | RTE_LOG(ERR, EAL, "Could not mmap %s\n", | |
1934 | eal_hugepage_data_path()); | |
1935 | goto error; | |
1936 | } | |
1937 | ||
1938 | num_hp = size / sizeof(struct hugepage_file); | |
1939 | RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp); | |
1940 | ||
1941 | /* map all segments into memory to make sure we get the addrs. the | |
1942 | * segments themselves are already in memseg list (which is shared and | |
1943 | * has its VA space already preallocated), so we just need to map | |
1944 | * everything into correct addresses. | |
1945 | */ | |
1946 | for (i = 0; i < num_hp; i++) { | |
1947 | struct hugepage_file *hf = &hp[i]; | |
1948 | size_t map_sz = hf->size; | |
1949 | void *map_addr = hf->final_va; | |
1950 | ||
1951 | /* if size is zero, no more pages left */ | |
1952 | if (map_sz == 0) | |
1953 | break; | |
1954 | ||
1955 | fd = open(hf->filepath, O_RDWR); | |
1956 | if (fd < 0) { | |
1957 | RTE_LOG(ERR, EAL, "Could not open %s: %s\n", | |
1958 | hf->filepath, strerror(errno)); | |
1959 | goto error; | |
1960 | } | |
1961 | ||
1962 | map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE, | |
1963 | MAP_SHARED | MAP_FIXED, fd, 0); | |
1964 | if (map_addr == MAP_FAILED) { | |
1965 | RTE_LOG(ERR, EAL, "Could not map %s: %s\n", | |
1966 | hf->filepath, strerror(errno)); | |
1967 | close(fd); | |
1968 | goto error; | |
1969 | } | |
1970 | ||
1971 | /* set shared lock on the file. */ | |
1972 | if (flock(fd, LOCK_SH) < 0) { | |
1973 | RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n", | |
1974 | __func__, strerror(errno)); | |
1975 | close(fd); | |
1976 | goto error; | |
1977 | } | |
1978 | ||
1979 | close(fd); | |
1980 | } | |
1981 | /* unmap the hugepage config file, since we are done using it */ | |
1982 | munmap(hp, size); | |
1983 | close(fd_hugepage); | |
1984 | return 0; | |
1985 | ||
1986 | error: | |
1987 | /* map all segments into memory to make sure we get the addrs */ | |
1988 | cur_seg = 0; | |
1989 | for (cur_seg = 0; cur_seg < i; cur_seg++) { | |
1990 | struct hugepage_file *hf = &hp[i]; | |
1991 | size_t map_sz = hf->size; | |
1992 | void *map_addr = hf->final_va; | |
1993 | ||
1994 | munmap(map_addr, map_sz); | |
1995 | } | |
1996 | if (hp != NULL && hp != MAP_FAILED) | |
1997 | munmap(hp, size); | |
1998 | if (fd_hugepage >= 0) | |
1999 | close(fd_hugepage); | |
2000 | return -1; | |
2001 | } | |
2002 | ||
2003 | static int | |
2004 | eal_hugepage_attach(void) | |
2005 | { | |
2006 | if (eal_memalloc_sync_with_primary()) { | |
2007 | RTE_LOG(ERR, EAL, "Could not map memory from primary process\n"); | |
2008 | if (aslr_enabled() > 0) | |
2009 | RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n"); | |
2010 | return -1; | |
2011 | } | |
2012 | return 0; | |
2013 | } | |
2014 | ||
2015 | int | |
2016 | rte_eal_hugepage_init(void) | |
2017 | { | |
2018 | return internal_config.legacy_mem ? | |
2019 | eal_legacy_hugepage_init() : | |
2020 | eal_hugepage_init(); | |
2021 | } | |
2022 | ||
2023 | int | |
2024 | rte_eal_hugepage_attach(void) | |
2025 | { | |
2026 | return internal_config.legacy_mem ? | |
2027 | eal_legacy_hugepage_attach() : | |
2028 | eal_hugepage_attach(); | |
2029 | } | |
2030 | ||
2031 | int | |
2032 | rte_eal_using_phys_addrs(void) | |
2033 | { | |
2034 | return phys_addrs_available; | |
2035 | } | |
2036 | ||
2037 | static int __rte_unused | |
2038 | memseg_primary_init_32(void) | |
2039 | { | |
2040 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
2041 | int active_sockets, hpi_idx, msl_idx = 0; | |
2042 | unsigned int socket_id, i; | |
2043 | struct rte_memseg_list *msl; | |
2044 | uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem; | |
2045 | uint64_t max_mem; | |
2046 | ||
2047 | /* no-huge does not need this at all */ | |
2048 | if (internal_config.no_hugetlbfs) | |
2049 | return 0; | |
2050 | ||
2051 | /* this is a giant hack, but desperate times call for desperate | |
2052 | * measures. in legacy 32-bit mode, we cannot preallocate VA space, | |
2053 | * because having upwards of 2 gigabytes of VA space already mapped will | |
2054 | * interfere with our ability to map and sort hugepages. | |
2055 | * | |
2056 | * therefore, in legacy 32-bit mode, we will be initializing memseg | |
2057 | * lists much later - in eal_memory.c, right after we unmap all the | |
2058 | * unneeded pages. this will not affect secondary processes, as those | |
2059 | * should be able to mmap the space without (too many) problems. | |
2060 | */ | |
2061 | if (internal_config.legacy_mem) | |
2062 | return 0; | |
2063 | ||
2064 | /* 32-bit mode is a very special case. we cannot know in advance where | |
2065 | * the user will want to allocate their memory, so we have to do some | |
2066 | * heuristics. | |
2067 | */ | |
2068 | active_sockets = 0; | |
2069 | total_requested_mem = 0; | |
2070 | if (internal_config.force_sockets) | |
2071 | for (i = 0; i < rte_socket_count(); i++) { | |
2072 | uint64_t mem; | |
2073 | ||
2074 | socket_id = rte_socket_id_by_idx(i); | |
2075 | mem = internal_config.socket_mem[socket_id]; | |
2076 | ||
2077 | if (mem == 0) | |
2078 | continue; | |
2079 | ||
2080 | active_sockets++; | |
2081 | total_requested_mem += mem; | |
2082 | } | |
2083 | else | |
2084 | total_requested_mem = internal_config.memory; | |
2085 | ||
2086 | max_mem = (uint64_t)RTE_MAX_MEM_MB << 20; | |
2087 | if (total_requested_mem > max_mem) { | |
2088 | RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n", | |
2089 | (unsigned int)(max_mem >> 20)); | |
2090 | return -1; | |
2091 | } | |
2092 | total_extra_mem = max_mem - total_requested_mem; | |
2093 | extra_mem_per_socket = active_sockets == 0 ? total_extra_mem : | |
2094 | total_extra_mem / active_sockets; | |
2095 | ||
2096 | /* the allocation logic is a little bit convoluted, but here's how it | |
2097 | * works, in a nutshell: | |
2098 | * - if user hasn't specified on which sockets to allocate memory via | |
2099 | * --socket-mem, we allocate all of our memory on master core socket. | |
2100 | * - if user has specified sockets to allocate memory on, there may be | |
2101 | * some "unused" memory left (e.g. if user has specified --socket-mem | |
2102 | * such that not all memory adds up to 2 gigabytes), so add it to all | |
2103 | * sockets that are in use equally. | |
2104 | * | |
2105 | * page sizes are sorted by size in descending order, so we can safely | |
2106 | * assume that we dispense with bigger page sizes first. | |
2107 | */ | |
2108 | ||
2109 | /* create memseg lists */ | |
2110 | for (i = 0; i < rte_socket_count(); i++) { | |
2111 | int hp_sizes = (int) internal_config.num_hugepage_sizes; | |
2112 | uint64_t max_socket_mem, cur_socket_mem; | |
2113 | unsigned int master_lcore_socket; | |
2114 | struct rte_config *cfg = rte_eal_get_configuration(); | |
2115 | bool skip; | |
2116 | ||
2117 | socket_id = rte_socket_id_by_idx(i); | |
2118 | ||
2119 | #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
2120 | if (socket_id > 0) | |
2121 | break; | |
2122 | #endif | |
2123 | ||
2124 | /* if we didn't specifically request memory on this socket */ | |
2125 | skip = active_sockets != 0 && | |
2126 | internal_config.socket_mem[socket_id] == 0; | |
2127 | /* ...or if we didn't specifically request memory on *any* | |
2128 | * socket, and this is not master lcore | |
2129 | */ | |
2130 | master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore); | |
2131 | skip |= active_sockets == 0 && socket_id != master_lcore_socket; | |
2132 | ||
2133 | if (skip) { | |
2134 | RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n", | |
2135 | socket_id); | |
2136 | continue; | |
2137 | } | |
2138 | ||
2139 | /* max amount of memory on this socket */ | |
2140 | max_socket_mem = (active_sockets != 0 ? | |
2141 | internal_config.socket_mem[socket_id] : | |
2142 | internal_config.memory) + | |
2143 | extra_mem_per_socket; | |
2144 | cur_socket_mem = 0; | |
2145 | ||
2146 | for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) { | |
2147 | uint64_t max_pagesz_mem, cur_pagesz_mem = 0; | |
2148 | uint64_t hugepage_sz; | |
2149 | struct hugepage_info *hpi; | |
2150 | int type_msl_idx, max_segs, total_segs = 0; | |
2151 | ||
2152 | hpi = &internal_config.hugepage_info[hpi_idx]; | |
2153 | hugepage_sz = hpi->hugepage_sz; | |
2154 | ||
2155 | /* check if pages are actually available */ | |
2156 | if (hpi->num_pages[socket_id] == 0) | |
2157 | continue; | |
2158 | ||
2159 | max_segs = RTE_MAX_MEMSEG_PER_TYPE; | |
2160 | max_pagesz_mem = max_socket_mem - cur_socket_mem; | |
2161 | ||
2162 | /* make it multiple of page size */ | |
2163 | max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem, | |
2164 | hugepage_sz); | |
2165 | ||
2166 | RTE_LOG(DEBUG, EAL, "Attempting to preallocate " | |
2167 | "%" PRIu64 "M on socket %i\n", | |
2168 | max_pagesz_mem >> 20, socket_id); | |
2169 | ||
2170 | type_msl_idx = 0; | |
2171 | while (cur_pagesz_mem < max_pagesz_mem && | |
2172 | total_segs < max_segs) { | |
2173 | uint64_t cur_mem; | |
2174 | unsigned int n_segs; | |
2175 | ||
2176 | if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { | |
2177 | RTE_LOG(ERR, EAL, | |
2178 | "No more space in memseg lists, please increase %s\n", | |
2179 | RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS)); | |
2180 | return -1; | |
2181 | } | |
2182 | ||
2183 | msl = &mcfg->memsegs[msl_idx]; | |
2184 | ||
2185 | cur_mem = get_mem_amount(hugepage_sz, | |
2186 | max_pagesz_mem); | |
2187 | n_segs = cur_mem / hugepage_sz; | |
2188 | ||
2189 | if (alloc_memseg_list(msl, hugepage_sz, n_segs, | |
2190 | socket_id, type_msl_idx)) { | |
2191 | /* failing to allocate a memseg list is | |
2192 | * a serious error. | |
2193 | */ | |
2194 | RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n"); | |
2195 | return -1; | |
2196 | } | |
2197 | ||
2198 | if (alloc_va_space(msl)) { | |
2199 | /* if we couldn't allocate VA space, we | |
2200 | * can try with smaller page sizes. | |
2201 | */ | |
2202 | RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n"); | |
2203 | /* deallocate memseg list */ | |
2204 | if (free_memseg_list(msl)) | |
2205 | return -1; | |
2206 | break; | |
2207 | } | |
2208 | ||
2209 | total_segs += msl->memseg_arr.len; | |
2210 | cur_pagesz_mem = total_segs * hugepage_sz; | |
2211 | type_msl_idx++; | |
2212 | msl_idx++; | |
2213 | } | |
2214 | cur_socket_mem += cur_pagesz_mem; | |
2215 | } | |
2216 | if (cur_socket_mem == 0) { | |
2217 | RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n", | |
2218 | socket_id); | |
2219 | return -1; | |
2220 | } | |
2221 | } | |
2222 | ||
2223 | return 0; | |
2224 | } | |
2225 | ||
2226 | static int __rte_unused | |
2227 | memseg_primary_init(void) | |
2228 | { | |
2229 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
2230 | int i, socket_id, hpi_idx, msl_idx = 0; | |
2231 | struct rte_memseg_list *msl; | |
2232 | uint64_t max_mem, total_mem; | |
2233 | ||
2234 | /* no-huge does not need this at all */ | |
2235 | if (internal_config.no_hugetlbfs) | |
2236 | return 0; | |
2237 | ||
2238 | max_mem = (uint64_t)RTE_MAX_MEM_MB << 20; | |
2239 | total_mem = 0; | |
2240 | ||
2241 | /* create memseg lists */ | |
2242 | for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes; | |
2243 | hpi_idx++) { | |
2244 | struct hugepage_info *hpi; | |
2245 | uint64_t hugepage_sz; | |
2246 | ||
2247 | hpi = &internal_config.hugepage_info[hpi_idx]; | |
2248 | hugepage_sz = hpi->hugepage_sz; | |
2249 | ||
2250 | for (i = 0; i < (int) rte_socket_count(); i++) { | |
2251 | uint64_t max_type_mem, total_type_mem = 0; | |
2252 | int type_msl_idx, max_segs, total_segs = 0; | |
2253 | ||
2254 | socket_id = rte_socket_id_by_idx(i); | |
2255 | ||
2256 | #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES | |
2257 | if (socket_id > 0) | |
2258 | break; | |
2259 | #endif | |
2260 | ||
2261 | if (total_mem >= max_mem) | |
2262 | break; | |
2263 | ||
2264 | max_type_mem = RTE_MIN(max_mem - total_mem, | |
2265 | (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20); | |
2266 | max_segs = RTE_MAX_MEMSEG_PER_TYPE; | |
2267 | ||
2268 | type_msl_idx = 0; | |
2269 | while (total_type_mem < max_type_mem && | |
2270 | total_segs < max_segs) { | |
2271 | uint64_t cur_max_mem, cur_mem; | |
2272 | unsigned int n_segs; | |
2273 | ||
2274 | if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { | |
2275 | RTE_LOG(ERR, EAL, | |
2276 | "No more space in memseg lists, please increase %s\n", | |
2277 | RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS)); | |
2278 | return -1; | |
2279 | } | |
2280 | ||
2281 | msl = &mcfg->memsegs[msl_idx++]; | |
2282 | ||
2283 | cur_max_mem = max_type_mem - total_type_mem; | |
2284 | ||
2285 | cur_mem = get_mem_amount(hugepage_sz, | |
2286 | cur_max_mem); | |
2287 | n_segs = cur_mem / hugepage_sz; | |
2288 | ||
2289 | if (alloc_memseg_list(msl, hugepage_sz, n_segs, | |
2290 | socket_id, type_msl_idx)) | |
2291 | return -1; | |
2292 | ||
2293 | total_segs += msl->memseg_arr.len; | |
2294 | total_type_mem = total_segs * hugepage_sz; | |
2295 | type_msl_idx++; | |
2296 | ||
2297 | if (alloc_va_space(msl)) { | |
2298 | RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n"); | |
2299 | return -1; | |
2300 | } | |
2301 | } | |
2302 | total_mem += total_type_mem; | |
2303 | } | |
2304 | } | |
2305 | return 0; | |
2306 | } | |
2307 | ||
2308 | static int | |
2309 | memseg_secondary_init(void) | |
2310 | { | |
2311 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
2312 | int msl_idx = 0; | |
2313 | struct rte_memseg_list *msl; | |
2314 | ||
2315 | for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) { | |
2316 | ||
2317 | msl = &mcfg->memsegs[msl_idx]; | |
2318 | ||
2319 | /* skip empty memseg lists */ | |
2320 | if (msl->memseg_arr.len == 0) | |
2321 | continue; | |
2322 | ||
2323 | if (rte_fbarray_attach(&msl->memseg_arr)) { | |
2324 | RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n"); | |
2325 | return -1; | |
2326 | } | |
2327 | ||
2328 | /* preallocate VA space */ | |
2329 | if (alloc_va_space(msl)) { | |
2330 | RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n"); | |
2331 | return -1; | |
2332 | } | |
2333 | } | |
2334 | ||
2335 | return 0; | |
2336 | } | |
2337 | ||
2338 | int | |
2339 | rte_eal_memseg_init(void) | |
2340 | { | |
2341 | return rte_eal_process_type() == RTE_PROC_PRIMARY ? | |
2342 | #ifndef RTE_ARCH_64 | |
2343 | memseg_primary_init_32() : | |
2344 | #else | |
2345 | memseg_primary_init() : | |
2346 | #endif | |
2347 | memseg_secondary_init(); | |
2348 | } |