]> git.proxmox.com Git - wasi-libc.git/blame - libc-top-half/musl/src/malloc/lite_malloc.c
Update to musl 1.2.2.
[wasi-libc.git] / libc-top-half / musl / src / malloc / lite_malloc.c
CommitLineData
320054e8
DG
1#include <stdlib.h>
2#include <stdint.h>
3#include <limits.h>
4#include <errno.h>
58795582
DG
5#include <sys/mman.h>
6#include "libc.h"
320054e8 7#include "lock.h"
58795582 8#include "syscall.h"
322bd4ff 9#include "fork_impl.h"
320054e8
DG
10
11#define ALIGN 16
12
58795582
DG
13/* This function returns true if the interval [old,new]
14 * intersects the 'len'-sized interval below &libc.auxv
15 * (interpreted as the main-thread stack) or below &b
16 * (the current stack). It is used to defend against
17 * buggy brk implementations that can cross the stack. */
18
19static int traverses_stack_p(uintptr_t old, uintptr_t new)
20{
21 const uintptr_t len = 8<<20;
22 uintptr_t a, b;
23
24 b = (uintptr_t)libc.auxv;
25 a = b > len ? b-len : 0;
26 if (new>a && old<b) return 1;
27
28 b = (uintptr_t)&b;
29 a = b > len ? b-len : 0;
30 if (new>a && old<b) return 1;
31
32 return 0;
33}
34
322bd4ff
DG
35static volatile int lock[1];
36volatile int *const __bump_lockptr = lock;
37
320054e8
DG
38static void *__simple_malloc(size_t n)
39{
58795582 40 static uintptr_t brk, cur, end;
58795582
DG
41 static unsigned mmap_step;
42 size_t align=1;
320054e8
DG
43 void *p;
44
58795582
DG
45 if (n > SIZE_MAX/2) {
46 errno = ENOMEM;
47 return 0;
48 }
49
320054e8
DG
50 if (!n) n++;
51 while (align<n && align<ALIGN)
52 align += align;
53
54 LOCK(lock);
55
58795582 56 cur += -cur & align-1;
320054e8
DG
57
58 if (n > end-cur) {
58795582
DG
59 size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
60
61 if (!cur) {
62 brk = __syscall(SYS_brk, 0);
63 brk += -brk & PAGE_SIZE-1;
64 cur = end = brk;
320054e8 65 }
58795582
DG
66
67 if (brk == end && req < SIZE_MAX-brk
68 && !traverses_stack_p(brk, brk+req)
69 && __syscall(SYS_brk, brk+req)==brk+req) {
70 brk = end += req;
71 } else {
72 int new_area = 0;
73 req = n + PAGE_SIZE-1 & -PAGE_SIZE;
74 /* Only make a new area rather than individual mmap
75 * if wasted space would be over 1/8 of the map. */
76 if (req-n > req/8) {
77 /* Geometric area size growth up to 64 pages,
78 * bounding waste by 1/8 of the area. */
79 size_t min = PAGE_SIZE<<(mmap_step/2);
80 if (min-n > end-cur) {
81 if (req < min) {
82 req = min;
83 if (mmap_step < 12)
84 mmap_step++;
85 }
86 new_area = 1;
87 }
88 }
89 void *mem = __mmap(0, req, PROT_READ|PROT_WRITE,
90 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
91 if (mem == MAP_FAILED || !new_area) {
92 UNLOCK(lock);
93 return mem==MAP_FAILED ? 0 : mem;
94 }
95 cur = (uintptr_t)mem;
96 end = cur + req;
320054e8 97 }
320054e8
DG
98 }
99
58795582 100 p = (void *)cur;
320054e8
DG
101 cur += n;
102 UNLOCK(lock);
103 return p;
104}
105
322bd4ff
DG
106weak_alias(__simple_malloc, __libc_malloc_impl);
107
108void *__libc_malloc(size_t n)
109{
110 return __libc_malloc_impl(n);
111}
112
113static void *default_malloc(size_t n)
114{
115 return __libc_malloc_impl(n);
116}
117
118weak_alias(default_malloc, malloc);