]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2019 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #include <stdint.h> | |
6 | #include <stddef.h> | |
7 | #include <stdio.h> | |
8 | #include <string.h> | |
9 | #include <sys/queue.h> | |
10 | ||
9f95a23c | 11 | #include <rte_errno.h> |
7c673cae FG |
12 | #include <rte_memcpy.h> |
13 | #include <rte_memory.h> | |
14 | #include <rte_eal.h> | |
15 | #include <rte_eal_memconfig.h> | |
16 | #include <rte_branch_prediction.h> | |
17 | #include <rte_debug.h> | |
18 | #include <rte_launch.h> | |
19 | #include <rte_per_lcore.h> | |
20 | #include <rte_lcore.h> | |
21 | #include <rte_common.h> | |
22 | #include <rte_spinlock.h> | |
23 | ||
24 | #include <rte_malloc.h> | |
25 | #include "malloc_elem.h" | |
26 | #include "malloc_heap.h" | |
9f95a23c | 27 | #include "eal_memalloc.h" |
7c673cae FG |
28 | |
29 | ||
30 | /* Free the memory space back to heap */ | |
31 | void rte_free(void *addr) | |
32 | { | |
33 | if (addr == NULL) return; | |
9f95a23c TL |
34 | if (malloc_heap_free(malloc_elem_from_data(addr)) < 0) |
35 | RTE_LOG(ERR, EAL, "Error: Invalid memory\n"); | |
7c673cae FG |
36 | } |
37 | ||
38 | /* | |
39 | * Allocate memory on specified heap. | |
40 | */ | |
41 | void * | |
9f95a23c TL |
42 | rte_malloc_socket(const char *type, size_t size, unsigned int align, |
43 | int socket_arg) | |
7c673cae | 44 | { |
7c673cae FG |
45 | /* return NULL if size is 0 or alignment is not power-of-2 */ |
46 | if (size == 0 || (align && !rte_is_power_of_2(align))) | |
47 | return NULL; | |
48 | ||
9f95a23c TL |
49 | /* if there are no hugepages and if we are not allocating from an |
50 | * external heap, use memory from any socket available. checking for | |
51 | * socket being external may return -1 in case of invalid socket, but | |
52 | * that's OK - if there are no hugepages, it doesn't matter. | |
53 | */ | |
54 | if (rte_malloc_heap_socket_is_external(socket_arg) != 1 && | |
55 | !rte_eal_has_hugepages()) | |
7c673cae FG |
56 | socket_arg = SOCKET_ID_ANY; |
57 | ||
9f95a23c TL |
58 | return malloc_heap_alloc(type, size, socket_arg, 0, |
59 | align == 0 ? 1 : align, 0, false); | |
7c673cae FG |
60 | } |
61 | ||
62 | /* | |
63 | * Allocate memory on default heap. | |
64 | */ | |
65 | void * | |
66 | rte_malloc(const char *type, size_t size, unsigned align) | |
67 | { | |
68 | return rte_malloc_socket(type, size, align, SOCKET_ID_ANY); | |
69 | } | |
70 | ||
71 | /* | |
72 | * Allocate zero'd memory on specified heap. | |
73 | */ | |
74 | void * | |
75 | rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket) | |
76 | { | |
9f95a23c TL |
77 | void *ptr = rte_malloc_socket(type, size, align, socket); |
78 | ||
79 | #ifdef RTE_MALLOC_DEBUG | |
80 | /* | |
81 | * If DEBUG is enabled, then freed memory is marked with poison | |
82 | * value and set to zero on allocation. | |
83 | * If DEBUG is not enabled then memory is already zeroed. | |
84 | */ | |
85 | if (ptr != NULL) | |
86 | memset(ptr, 0, size); | |
87 | #endif | |
88 | return ptr; | |
7c673cae FG |
89 | } |
90 | ||
91 | /* | |
92 | * Allocate zero'd memory on default heap. | |
93 | */ | |
94 | void * | |
95 | rte_zmalloc(const char *type, size_t size, unsigned align) | |
96 | { | |
97 | return rte_zmalloc_socket(type, size, align, SOCKET_ID_ANY); | |
98 | } | |
99 | ||
100 | /* | |
101 | * Allocate zero'd memory on specified heap. | |
102 | */ | |
103 | void * | |
104 | rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket) | |
105 | { | |
106 | return rte_zmalloc_socket(type, num * size, align, socket); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Allocate zero'd memory on default heap. | |
111 | */ | |
112 | void * | |
113 | rte_calloc(const char *type, size_t num, size_t size, unsigned align) | |
114 | { | |
115 | return rte_zmalloc(type, num * size, align); | |
116 | } | |
117 | ||
118 | /* | |
9f95a23c | 119 | * Resize allocated memory on specified heap. |
7c673cae FG |
120 | */ |
121 | void * | |
9f95a23c | 122 | rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) |
7c673cae FG |
123 | { |
124 | if (ptr == NULL) | |
9f95a23c | 125 | return rte_malloc_socket(NULL, size, align, socket); |
7c673cae FG |
126 | |
127 | struct malloc_elem *elem = malloc_elem_from_data(ptr); | |
9f95a23c TL |
128 | if (elem == NULL) { |
129 | RTE_LOG(ERR, EAL, "Error: memory corruption detected\n"); | |
130 | return NULL; | |
131 | } | |
7c673cae FG |
132 | |
133 | size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align); | |
9f95a23c TL |
134 | |
135 | /* check requested socket id and alignment matches first, and if ok, | |
136 | * see if we can resize block | |
137 | */ | |
138 | if ((socket == SOCKET_ID_ANY || | |
139 | (unsigned int)socket == elem->heap->socket_id) && | |
140 | RTE_PTR_ALIGN(ptr, align) == ptr && | |
141 | malloc_heap_resize(elem, size) == 0) | |
7c673cae FG |
142 | return ptr; |
143 | ||
9f95a23c TL |
144 | /* either requested socket id doesn't match, alignment is off |
145 | * or we have no room to expand, | |
146 | * so move the data. | |
147 | */ | |
148 | void *new_ptr = rte_malloc_socket(NULL, size, align, socket); | |
7c673cae FG |
149 | if (new_ptr == NULL) |
150 | return NULL; | |
151 | const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD; | |
152 | rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); | |
153 | rte_free(ptr); | |
154 | ||
155 | return new_ptr; | |
156 | } | |
157 | ||
9f95a23c TL |
158 | /* |
159 | * Resize allocated memory. | |
160 | */ | |
161 | void * | |
162 | rte_realloc(void *ptr, size_t size, unsigned int align) | |
163 | { | |
164 | return rte_realloc_socket(ptr, size, align, SOCKET_ID_ANY); | |
165 | } | |
166 | ||
7c673cae FG |
167 | int |
168 | rte_malloc_validate(const void *ptr, size_t *size) | |
169 | { | |
170 | const struct malloc_elem *elem = malloc_elem_from_data(ptr); | |
171 | if (!malloc_elem_cookies_ok(elem)) | |
172 | return -1; | |
173 | if (size != NULL) | |
174 | *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; | |
175 | return 0; | |
176 | } | |
177 | ||
178 | /* | |
179 | * Function to retrieve data for heap on given socket | |
180 | */ | |
181 | int | |
182 | rte_malloc_get_socket_stats(int socket, | |
183 | struct rte_malloc_socket_stats *socket_stats) | |
184 | { | |
185 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
9f95a23c | 186 | int heap_idx; |
7c673cae | 187 | |
9f95a23c TL |
188 | heap_idx = malloc_socket_to_heap_id(socket); |
189 | if (heap_idx < 0) | |
7c673cae FG |
190 | return -1; |
191 | ||
9f95a23c TL |
192 | return malloc_heap_get_stats(&mcfg->malloc_heaps[heap_idx], |
193 | socket_stats); | |
194 | } | |
195 | ||
196 | /* | |
197 | * Function to dump contents of all heaps | |
198 | */ | |
199 | void __rte_experimental | |
200 | rte_malloc_dump_heaps(FILE *f) | |
201 | { | |
202 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
203 | unsigned int idx; | |
204 | ||
205 | for (idx = 0; idx < RTE_MAX_HEAPS; idx++) { | |
206 | fprintf(f, "Heap id: %u\n", idx); | |
207 | malloc_heap_dump(&mcfg->malloc_heaps[idx], f); | |
208 | } | |
209 | } | |
210 | ||
211 | int | |
212 | rte_malloc_heap_get_socket(const char *name) | |
213 | { | |
214 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
215 | struct malloc_heap *heap = NULL; | |
216 | unsigned int idx; | |
217 | int ret; | |
218 | ||
219 | if (name == NULL || | |
220 | strnlen(name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
221 | strnlen(name, RTE_HEAP_NAME_MAX_LEN) == | |
222 | RTE_HEAP_NAME_MAX_LEN) { | |
223 | rte_errno = EINVAL; | |
224 | return -1; | |
225 | } | |
226 | rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); | |
227 | for (idx = 0; idx < RTE_MAX_HEAPS; idx++) { | |
228 | struct malloc_heap *tmp = &mcfg->malloc_heaps[idx]; | |
229 | ||
230 | if (!strncmp(name, tmp->name, RTE_HEAP_NAME_MAX_LEN)) { | |
231 | heap = tmp; | |
232 | break; | |
233 | } | |
234 | } | |
235 | ||
236 | if (heap != NULL) { | |
237 | ret = heap->socket_id; | |
238 | } else { | |
239 | rte_errno = ENOENT; | |
240 | ret = -1; | |
241 | } | |
242 | rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); | |
243 | ||
244 | return ret; | |
245 | } | |
246 | ||
247 | int | |
248 | rte_malloc_heap_socket_is_external(int socket_id) | |
249 | { | |
250 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
251 | unsigned int idx; | |
252 | int ret = -1; | |
253 | ||
254 | if (socket_id == SOCKET_ID_ANY) | |
255 | return 0; | |
256 | ||
257 | rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); | |
258 | for (idx = 0; idx < RTE_MAX_HEAPS; idx++) { | |
259 | struct malloc_heap *tmp = &mcfg->malloc_heaps[idx]; | |
260 | ||
261 | if ((int)tmp->socket_id == socket_id) { | |
262 | /* external memory always has large socket ID's */ | |
263 | ret = tmp->socket_id >= RTE_MAX_NUMA_NODES; | |
264 | break; | |
265 | } | |
266 | } | |
267 | rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); | |
268 | ||
269 | return ret; | |
7c673cae FG |
270 | } |
271 | ||
272 | /* | |
273 | * Print stats on memory type. If type is NULL, info on all types is printed | |
274 | */ | |
275 | void | |
276 | rte_malloc_dump_stats(FILE *f, __rte_unused const char *type) | |
277 | { | |
9f95a23c TL |
278 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; |
279 | unsigned int heap_id; | |
7c673cae | 280 | struct rte_malloc_socket_stats sock_stats; |
9f95a23c | 281 | |
7c673cae | 282 | /* Iterate through all initialised heaps */ |
9f95a23c TL |
283 | for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) { |
284 | struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id]; | |
7c673cae | 285 | |
9f95a23c TL |
286 | malloc_heap_get_stats(heap, &sock_stats); |
287 | ||
288 | fprintf(f, "Heap id:%u\n", heap_id); | |
289 | fprintf(f, "\tHeap name:%s\n", heap->name); | |
7c673cae FG |
290 | fprintf(f, "\tHeap_size:%zu,\n", sock_stats.heap_totalsz_bytes); |
291 | fprintf(f, "\tFree_size:%zu,\n", sock_stats.heap_freesz_bytes); | |
292 | fprintf(f, "\tAlloc_size:%zu,\n", sock_stats.heap_allocsz_bytes); | |
293 | fprintf(f, "\tGreatest_free_size:%zu,\n", | |
294 | sock_stats.greatest_free_size); | |
295 | fprintf(f, "\tAlloc_count:%u,\n",sock_stats.alloc_count); | |
296 | fprintf(f, "\tFree_count:%u,\n", sock_stats.free_count); | |
297 | } | |
298 | return; | |
299 | } | |
300 | ||
301 | /* | |
302 | * TODO: Set limit to memory that can be allocated to memory type | |
303 | */ | |
304 | int | |
305 | rte_malloc_set_limit(__rte_unused const char *type, | |
306 | __rte_unused size_t max) | |
307 | { | |
308 | return 0; | |
309 | } | |
310 | ||
311 | /* | |
9f95a23c | 312 | * Return the IO address of a virtual address obtained through rte_malloc |
7c673cae | 313 | */ |
9f95a23c TL |
314 | rte_iova_t |
315 | rte_malloc_virt2iova(const void *addr) | |
7c673cae | 316 | { |
9f95a23c TL |
317 | const struct rte_memseg *ms; |
318 | struct malloc_elem *elem = malloc_elem_from_data(addr); | |
319 | ||
7c673cae | 320 | if (elem == NULL) |
9f95a23c TL |
321 | return RTE_BAD_IOVA; |
322 | ||
323 | if (!elem->msl->external && rte_eal_iova_mode() == RTE_IOVA_VA) | |
324 | return (uintptr_t) addr; | |
325 | ||
326 | ms = rte_mem_virt2memseg(addr, elem->msl); | |
327 | if (ms == NULL) | |
328 | return RTE_BAD_IOVA; | |
329 | ||
330 | if (ms->iova == RTE_BAD_IOVA) | |
331 | return RTE_BAD_IOVA; | |
332 | ||
333 | return ms->iova + RTE_PTR_DIFF(addr, ms->addr); | |
334 | } | |
335 | ||
336 | static struct malloc_heap * | |
337 | find_named_heap(const char *name) | |
338 | { | |
339 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
340 | unsigned int i; | |
341 | ||
342 | for (i = 0; i < RTE_MAX_HEAPS; i++) { | |
343 | struct malloc_heap *heap = &mcfg->malloc_heaps[i]; | |
344 | ||
345 | if (!strncmp(name, heap->name, RTE_HEAP_NAME_MAX_LEN)) | |
346 | return heap; | |
347 | } | |
348 | return NULL; | |
349 | } | |
350 | ||
351 | int | |
352 | rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len, | |
353 | rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz) | |
354 | { | |
355 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
356 | struct malloc_heap *heap = NULL; | |
357 | struct rte_memseg_list *msl; | |
358 | unsigned int n; | |
359 | int ret; | |
360 | ||
361 | if (heap_name == NULL || va_addr == NULL || | |
362 | page_sz == 0 || !rte_is_power_of_2(page_sz) || | |
363 | RTE_ALIGN(len, page_sz) != len || | |
364 | !rte_is_aligned(va_addr, page_sz) || | |
365 | ((len / page_sz) != n_pages && iova_addrs != NULL) || | |
366 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
367 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == | |
368 | RTE_HEAP_NAME_MAX_LEN) { | |
369 | rte_errno = EINVAL; | |
370 | return -1; | |
371 | } | |
372 | rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); | |
373 | ||
374 | /* find our heap */ | |
375 | heap = find_named_heap(heap_name); | |
376 | if (heap == NULL) { | |
377 | rte_errno = ENOENT; | |
378 | ret = -1; | |
379 | goto unlock; | |
380 | } | |
381 | if (heap->socket_id < RTE_MAX_NUMA_NODES) { | |
382 | /* cannot add memory to internal heaps */ | |
383 | rte_errno = EPERM; | |
384 | ret = -1; | |
385 | goto unlock; | |
386 | } | |
387 | n = len / page_sz; | |
388 | ||
389 | msl = malloc_heap_create_external_seg(va_addr, iova_addrs, n, page_sz, | |
390 | heap_name, heap->socket_id); | |
391 | if (msl == NULL) { | |
392 | ret = -1; | |
393 | goto unlock; | |
394 | } | |
395 | ||
396 | rte_spinlock_lock(&heap->lock); | |
397 | ret = malloc_heap_add_external_memory(heap, msl); | |
398 | rte_spinlock_unlock(&heap->lock); | |
399 | ||
400 | unlock: | |
401 | rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); | |
402 | ||
403 | return ret; | |
404 | } | |
405 | ||
406 | int | |
407 | rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len) | |
408 | { | |
409 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
410 | struct malloc_heap *heap = NULL; | |
411 | struct rte_memseg_list *msl; | |
412 | int ret; | |
413 | ||
414 | if (heap_name == NULL || va_addr == NULL || len == 0 || | |
415 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
416 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == | |
417 | RTE_HEAP_NAME_MAX_LEN) { | |
418 | rte_errno = EINVAL; | |
419 | return -1; | |
420 | } | |
421 | rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); | |
422 | /* find our heap */ | |
423 | heap = find_named_heap(heap_name); | |
424 | if (heap == NULL) { | |
425 | rte_errno = ENOENT; | |
426 | ret = -1; | |
427 | goto unlock; | |
428 | } | |
429 | if (heap->socket_id < RTE_MAX_NUMA_NODES) { | |
430 | /* cannot remove memory from internal heaps */ | |
431 | rte_errno = EPERM; | |
432 | ret = -1; | |
433 | goto unlock; | |
434 | } | |
435 | ||
436 | msl = malloc_heap_find_external_seg(va_addr, len); | |
437 | if (msl == NULL) { | |
438 | ret = -1; | |
439 | goto unlock; | |
440 | } | |
441 | ||
442 | rte_spinlock_lock(&heap->lock); | |
443 | ret = malloc_heap_remove_external_memory(heap, va_addr, len); | |
444 | rte_spinlock_unlock(&heap->lock); | |
445 | if (ret != 0) | |
446 | goto unlock; | |
447 | ||
448 | ret = malloc_heap_destroy_external_seg(msl); | |
449 | ||
450 | unlock: | |
451 | rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); | |
452 | ||
453 | return ret; | |
454 | } | |
455 | ||
456 | static int | |
457 | sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach) | |
458 | { | |
459 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
460 | struct malloc_heap *heap = NULL; | |
461 | struct rte_memseg_list *msl; | |
462 | int ret; | |
463 | ||
464 | if (heap_name == NULL || va_addr == NULL || len == 0 || | |
465 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
466 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == | |
467 | RTE_HEAP_NAME_MAX_LEN) { | |
468 | rte_errno = EINVAL; | |
469 | return -1; | |
470 | } | |
471 | rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); | |
472 | ||
473 | /* find our heap */ | |
474 | heap = find_named_heap(heap_name); | |
475 | if (heap == NULL) { | |
476 | rte_errno = ENOENT; | |
477 | ret = -1; | |
478 | goto unlock; | |
479 | } | |
480 | /* we shouldn't be able to sync to internal heaps */ | |
481 | if (heap->socket_id < RTE_MAX_NUMA_NODES) { | |
482 | rte_errno = EPERM; | |
483 | ret = -1; | |
484 | goto unlock; | |
485 | } | |
486 | ||
487 | /* find corresponding memseg list to sync to */ | |
488 | msl = malloc_heap_find_external_seg(va_addr, len); | |
489 | if (msl == NULL) { | |
490 | ret = -1; | |
491 | goto unlock; | |
492 | } | |
493 | ||
494 | if (attach) { | |
495 | ret = rte_fbarray_attach(&msl->memseg_arr); | |
496 | if (ret == 0) { | |
497 | /* notify all subscribers that a new memory area was | |
498 | * added. | |
499 | */ | |
500 | eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, | |
501 | va_addr, len); | |
502 | } else { | |
503 | ret = -1; | |
504 | goto unlock; | |
505 | } | |
506 | } else { | |
507 | /* notify all subscribers that a memory area is about to | |
508 | * be removed. | |
509 | */ | |
510 | eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, | |
511 | msl->base_va, msl->len); | |
512 | ret = rte_fbarray_detach(&msl->memseg_arr); | |
513 | if (ret < 0) { | |
514 | ret = -1; | |
515 | goto unlock; | |
516 | } | |
517 | } | |
518 | unlock: | |
519 | rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); | |
520 | return ret; | |
521 | } | |
522 | ||
523 | int | |
524 | rte_malloc_heap_memory_attach(const char *heap_name, void *va_addr, size_t len) | |
525 | { | |
526 | return sync_memory(heap_name, va_addr, len, true); | |
527 | } | |
528 | ||
529 | int | |
530 | rte_malloc_heap_memory_detach(const char *heap_name, void *va_addr, size_t len) | |
531 | { | |
532 | return sync_memory(heap_name, va_addr, len, false); | |
533 | } | |
534 | ||
535 | int | |
536 | rte_malloc_heap_create(const char *heap_name) | |
537 | { | |
538 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
539 | struct malloc_heap *heap = NULL; | |
540 | int i, ret; | |
541 | ||
542 | if (heap_name == NULL || | |
543 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
544 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == | |
545 | RTE_HEAP_NAME_MAX_LEN) { | |
546 | rte_errno = EINVAL; | |
547 | return -1; | |
548 | } | |
549 | /* check if there is space in the heap list, or if heap with this name | |
550 | * already exists. | |
551 | */ | |
552 | rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); | |
553 | ||
554 | for (i = 0; i < RTE_MAX_HEAPS; i++) { | |
555 | struct malloc_heap *tmp = &mcfg->malloc_heaps[i]; | |
556 | /* existing heap */ | |
557 | if (strncmp(heap_name, tmp->name, | |
558 | RTE_HEAP_NAME_MAX_LEN) == 0) { | |
559 | RTE_LOG(ERR, EAL, "Heap %s already exists\n", | |
560 | heap_name); | |
561 | rte_errno = EEXIST; | |
562 | ret = -1; | |
563 | goto unlock; | |
564 | } | |
565 | /* empty heap */ | |
566 | if (strnlen(tmp->name, RTE_HEAP_NAME_MAX_LEN) == 0) { | |
567 | heap = tmp; | |
568 | break; | |
569 | } | |
570 | } | |
571 | if (heap == NULL) { | |
572 | RTE_LOG(ERR, EAL, "Cannot create new heap: no space\n"); | |
573 | rte_errno = ENOSPC; | |
574 | ret = -1; | |
575 | goto unlock; | |
576 | } | |
577 | ||
578 | /* we're sure that we can create a new heap, so do it */ | |
579 | ret = malloc_heap_create(heap, heap_name); | |
580 | unlock: | |
581 | rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); | |
582 | ||
583 | return ret; | |
584 | } | |
585 | ||
586 | int | |
587 | rte_malloc_heap_destroy(const char *heap_name) | |
588 | { | |
589 | struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; | |
590 | struct malloc_heap *heap = NULL; | |
591 | int ret; | |
592 | ||
593 | if (heap_name == NULL || | |
594 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 || | |
595 | strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == | |
596 | RTE_HEAP_NAME_MAX_LEN) { | |
597 | rte_errno = EINVAL; | |
598 | return -1; | |
599 | } | |
600 | rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); | |
601 | ||
602 | /* start from non-socket heaps */ | |
603 | heap = find_named_heap(heap_name); | |
604 | if (heap == NULL) { | |
605 | RTE_LOG(ERR, EAL, "Heap %s not found\n", heap_name); | |
606 | rte_errno = ENOENT; | |
607 | ret = -1; | |
608 | goto unlock; | |
609 | } | |
610 | /* we shouldn't be able to destroy internal heaps */ | |
611 | if (heap->socket_id < RTE_MAX_NUMA_NODES) { | |
612 | rte_errno = EPERM; | |
613 | ret = -1; | |
614 | goto unlock; | |
615 | } | |
616 | /* sanity checks done, now we can destroy the heap */ | |
617 | rte_spinlock_lock(&heap->lock); | |
618 | ret = malloc_heap_destroy(heap); | |
619 | ||
620 | /* if we failed, lock is still active */ | |
621 | if (ret < 0) | |
622 | rte_spinlock_unlock(&heap->lock); | |
623 | unlock: | |
624 | rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); | |
625 | ||
626 | return ret; | |
7c673cae | 627 | } |