]> git.proxmox.com Git - wasi-libc.git/blame - libc-bottom-half/cloudlibc/src/include/stdlib.h
Update README and add CI-tests for minimal supported LLVM-version (10) (#302)
[wasi-libc.git] / libc-bottom-half / cloudlibc / src / include / stdlib.h
CommitLineData
320054e8
DG
1// Copyright (c) 2015-2017 Nuxi, https://nuxi.nl/
2//
3// Redistribution and use in source and binary forms, with or without
4// modification, are permitted provided that the following conditions
5// are met:
6// 1. Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// 2. Redistributions in binary form must reproduce the above copyright
9// notice, this list of conditions and the following disclaimer in the
10// documentation and/or other materials provided with the distribution.
11//
12// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
13// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
16// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
17// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
20// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
22// SUCH DAMAGE.
23
24// <stdlib.h> - standard library definitions
25//
26// Extensions:
27// - MB_CUR_MAX_L(), mblen_l(), mbstowcs_l(), mbtowc_l(), wcstombs_l()
28// and wctomb_l():
29// Regular functions always use the C locale. Available on many other
30// operating systems.
31// - alloca():
32// Present on most other operating systems.
33// - arc4random(), arc4random_buf() and arc4random_uniform():
34// Secure random number generator. Available on many other operating
35// systems.
36// - l64a_r():
37// Thread-safe replacement for l64a(). Part of the SVID, 4th edition.
38// - qsort_r():
39// Available on many other operating systems, although the prototype
40// is not consistent. This implementation is compatible with glibc.
41// It is expected that this version will be standardized in the future.
42// - reallocarray():
43// Allows for reallocation of buffers without integer overflows.
44//
45// Features missing:
46// - initstate(), lcong48(), seed48(), setstate(), srand(), srand48()
47// and srandom():
48// Randomizer is seeded securely by default. There is no need to seed
49// manually.
50// - WEXITSTATUS(), WIFEXITED(), WIFSIGNALED(), WIFSTOPPED(), WNOHANG,
51// WSTOPSIG(), WTERMSIG(), WUNTRACED:
52// Only useful if system() would actually work.
53// - l64a():
54// Not thread-safe. Use l64a_r() instead.
55// - putenv(), setenv() and unsetenv():
56// Environment variables are not available.
57// - grantpt(), posix_openpt(), ptsname() and unlockpt():
58// Pseudo-terminals are not available.
59// - mkdtemp(), mkstemp() and realpath():
60// Requires global filesystem namespace.
61// - setkey():
62// Password database and encryption schemes not available.
63// - system():
64// Requires a command shell.
65
66#ifndef _STDLIB_H_
67#define _STDLIB_H_
68
69#include <_/limits.h>
70#include <_/types.h>
71
320054e8
DG
72__BEGIN_DECLS
73_Noreturn void _Exit(int);
320054e8 74_Noreturn void abort(void);
320054e8 75void *calloc(size_t, size_t);
614d783e 76_Noreturn void exit(int);
320054e8 77void free(void *);
320054e8 78void *malloc(size_t);
320054e8 79void qsort(void *, size_t, size_t, int (*)(const void *, const void *));
320054e8 80void *realloc(void *, size_t);
afbf94c3 81__END_DECLS
320054e8
DG
82
83#if _CLOUDLIBC_INLINE_FUNCTIONS
320054e8
DG
84
85// qsort_r() implementation from Bentley and McIlroy's
86// "Engineering a Sort Function".
87//
88// This sorting function is inlined into this header, so that the
89// compiler can create an optimized version that takes the alignment and
90// size of the elements into account. It also reduces the overhead of
91// indirect function calls.
92
93static __inline void __qsort_r(void *, size_t, size_t,
94 int (*)(const void *, const void *, void *),
95 void *);
96
97static __inline size_t __qsort_min(size_t __a, size_t __b) {
98 return __a < __b ? __a : __b;
99}
100
101// Swaps the contents of two buffers.
102static __inline void __qsort_swap(char *__a, char *__b, size_t __n) {
103 char __t;
104
105 while (__n-- > 0) {
106 __t = *__a;
107 *__a++ = *__b;
108 *__b++ = __t;
109 }
110}
111
112// Implementation of insertionsort for small lists.
113static __inline void __qsort_insertionsort(
114 char *__a, size_t __nel, size_t __width,
115 int (*__cmp)(const void *, const void *, void *), void *__thunk) {
116 char *__pm, *__pl;
117
118 for (__pm = __a + __width; __pm < __a + __nel * __width; __pm += __width)
119 for (__pl = __pm; __pl > __a && __cmp(__pl - __width, __pl, __thunk) > 0;
120 __pl -= __width)
121 __qsort_swap(__pl, __pl - __width, __width);
122}
123
124// Returns the median of three elements.
125static __inline char *__qsort_med3(char *__a, char *__b, char *__c,
126 int (*__cmp)(const void *, const void *,
127 void *),
128 void *__thunk) {
129 return __cmp(__a, __b, __thunk) < 0
130 ? (__cmp(__b, __c, __thunk) < 0
131 ? __b
132 : __cmp(__a, __c, __thunk) < 0 ? __c : __a)
133 : (__cmp(__b, __c, __thunk) > 0
134 ? __b
135 : __cmp(__a, __c, __thunk) > 0 ? __c : __a);
136}
137
138// Picks a pivot based on a pseudo-median of three or nine.
139// TODO(ed): Does this still guarantee an O(n log n) running time?
140static __inline char *__qsort_pickpivot(char *__a, size_t __nel, size_t __width,
141 int (*__cmp)(const void *, const void *,
142 void *),
143 void *__thunk) {
144 char *__pl, *__pm, *__pn;
145 size_t __s;
146
147 __pl = __a;
148 __pm = __a + (__nel / 2) * __width;
149 __pn = __a + (__nel - 1) * __width;
150 if (__nel > 40) {
151 __s = (__nel / 8) * __width;
152 __pl = __qsort_med3(__pl, __pl + __s, __pl + 2 * __s, __cmp, __thunk);
153 __pm = __qsort_med3(__pm - __s, __pm, __pm + __s, __cmp, __thunk);
154 __pn = __qsort_med3(__pn - 2 * __s, __pn - __s, __pn, __cmp, __thunk);
155 }
156 return __qsort_med3(__pl, __pm, __pn, __cmp, __thunk);
157}
158
159// Implementation of quicksort for larger lists.
160static __inline void __qsort_quicksort(char *__a, size_t __nel, size_t __width,
161 int (*__cmp)(const void *, const void *,
162 void *),
163 void *__thunk) {
164 char *__pa, *__pb, *__pc, *__pd, *__pn;
165 int __r;
166 size_t __s;
167
168 // Select pivot and move it to the head of the list.
169 __qsort_swap(__a, __qsort_pickpivot(__a, __nel, __width, __cmp, __thunk),
170 __width);
171
172 // Perform partitioning.
173 __pa = __pb = __a;
174 __pc = __pd = __a + (__nel - 1) * __width;
175 for (;;) {
176 while (__pb <= __pc && (__r = __cmp(__pb, __a, __thunk)) <= 0) {
177 if (__r == 0) {
178 __qsort_swap(__pa, __pb, __width);
179 __pa += __width;
180 }
181 __pb += __width;
182 }
183 while (__pc >= __pb && (__r = __cmp(__pc, __a, __thunk)) >= 0) {
184 if (__r == 0) {
185 __qsort_swap(__pc, __pd, __width);
186 __pd -= __width;
187 }
188 __pc -= __width;
189 }
190 if (__pb > __pc)
191 break;
192 __qsort_swap(__pb, __pc, __width);
193 __pb += __width;
194 __pc -= __width;
195 }
196
197 // Store pivot between the two partitions.
198 __pn = __a + __nel * __width;
199 __s = __qsort_min((size_t)(__pa - __a), (size_t)(__pb - __pa));
200 __qsort_swap(__a, __pb - __s, __s);
201 __s = __qsort_min((size_t)(__pd - __pc), (size_t)(__pn - __pd) - __width);
202 __qsort_swap(__pb, __pn - __s, __s);
203
204 // Sort the two partitions.
205 __s = (size_t)(__pb - __pa);
206 __qsort_r(__a, __s / __width, __width, __cmp, __thunk);
207 __s = (size_t)(__pd - __pc);
208 __qsort_r(__pn - __s, __s / __width, __width, __cmp, __thunk);
209}
210
211static __inline void __qsort_r(void *__base, size_t __nel, size_t __width,
212 int (*__cmp)(const void *, const void *, void *),
213 void *__thunk) {
214 char *__a;
215
216 __a = (char *)__base;
217 if (__nel < 8) {
218 __qsort_insertionsort(__a, __nel, __width, __cmp, __thunk);
219 } else {
220 __qsort_quicksort(__a, __nel, __width, __cmp, __thunk);
221 }
222}
223#define qsort_r(base, nel, width, compar, thunk) \
224 __qsort_r(base, nel, width, compar, thunk)
225
226// qsort(): Call into qsort_r(), providing the callback as the thunk.
227// We assume that the optimizer is smart enough to simplify.
228
229static __inline int __qsort_cmp(const void *__a, const void *__b,
230 void *__thunk) {
231 return ((int (*)(const void *, const void *))__thunk)(__a, __b);
232}
233
234static __inline void __qsort(void *__base, size_t __nel, size_t __width,
235 int (*__cmp)(const void *, const void *)) {
236 qsort_r(__base, __nel, __width, __qsort_cmp, (void *)__cmp);
237}
238#define qsort(base, nel, width, compar) __qsort(base, nel, width, compar)
239#endif
240
241#endif