]>
git.proxmox.com Git - wasi-libc.git/blob - libc-bottom-half/cloudlibc/src/include/stdlib.h
1 // Copyright (c) 2015-2017 Nuxi, https://nuxi.nl/
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions
6 // 1. Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // 2. Redistributions in binary form must reproduce the above copyright
9 // notice, this list of conditions and the following disclaimer in the
10 // documentation and/or other materials provided with the distribution.
12 // THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
13 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15 // ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
16 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
17 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18 // OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
20 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21 // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 // <stdlib.h> - standard library definitions
27 // - MB_CUR_MAX_L(), mblen_l(), mbstowcs_l(), mbtowc_l(), wcstombs_l()
29 // Regular functions always use the C locale. Available on many other
32 // Present on most other operating systems.
33 // - arc4random(), arc4random_buf() and arc4random_uniform():
34 // Secure random number generator. Available on many other operating
37 // Thread-safe replacement for l64a(). Part of the SVID, 4th edition.
39 // Available on many other operating systems, although the prototype
40 // is not consistent. This implementation is compatible with glibc.
41 // It is expected that this version will be standardized in the future.
43 // Allows for reallocation of buffers without integer overflows.
46 // - initstate(), lcong48(), seed48(), setstate(), srand(), srand48()
48 // Randomizer is seeded securely by default. There is no need to seed
50 // - WEXITSTATUS(), WIFEXITED(), WIFSIGNALED(), WIFSTOPPED(), WNOHANG,
51 // WSTOPSIG(), WTERMSIG(), WUNTRACED:
52 // Only useful if system() would actually work.
54 // Not thread-safe. Use l64a_r() instead.
55 // - putenv(), setenv() and unsetenv():
56 // Environment variables are not available.
57 // - grantpt(), posix_openpt(), ptsname() and unlockpt():
58 // Pseudo-terminals are not available.
59 // - mkdtemp(), mkstemp() and realpath():
60 // Requires global filesystem namespace.
62 // Password database and encryption schemes not available.
64 // Requires a command shell.
73 _Noreturn
void _Exit(int);
74 _Noreturn
void abort(void);
75 void *calloc(size_t, size_t);
76 _Noreturn
void exit(int);
79 void qsort(void *, size_t, size_t, int (*)(const void *, const void *));
80 void *realloc(void *, size_t);
83 #if _CLOUDLIBC_INLINE_FUNCTIONS
85 // qsort_r() implementation from Bentley and McIlroy's
86 // "Engineering a Sort Function".
88 // This sorting function is inlined into this header, so that the
89 // compiler can create an optimized version that takes the alignment and
90 // size of the elements into account. It also reduces the overhead of
91 // indirect function calls.
93 static __inline
void __qsort_r(void *, size_t, size_t,
94 int (*)(const void *, const void *, void *),
97 static __inline
size_t __qsort_min(size_t __a
, size_t __b
) {
98 return __a
< __b
? __a
: __b
;
101 // Swaps the contents of two buffers.
102 static __inline
void __qsort_swap(char *__a
, char *__b
, size_t __n
) {
112 // Implementation of insertionsort for small lists.
113 static __inline
void __qsort_insertionsort(
114 char *__a
, size_t __nel
, size_t __width
,
115 int (*__cmp
)(const void *, const void *, void *), void *__thunk
) {
118 for (__pm
= __a
+ __width
; __pm
< __a
+ __nel
* __width
; __pm
+= __width
)
119 for (__pl
= __pm
; __pl
> __a
&& __cmp(__pl
- __width
, __pl
, __thunk
) > 0;
121 __qsort_swap(__pl
, __pl
- __width
, __width
);
124 // Returns the median of three elements.
125 static __inline
char *__qsort_med3(char *__a
, char *__b
, char *__c
,
126 int (*__cmp
)(const void *, const void *,
129 return __cmp(__a
, __b
, __thunk
) < 0
130 ? (__cmp(__b
, __c
, __thunk
) < 0
132 : __cmp(__a
, __c
, __thunk
) < 0 ? __c
: __a
)
133 : (__cmp(__b
, __c
, __thunk
) > 0
135 : __cmp(__a
, __c
, __thunk
) > 0 ? __c
: __a
);
138 // Picks a pivot based on a pseudo-median of three or nine.
139 // TODO(ed): Does this still guarantee an O(n log n) running time?
140 static __inline
char *__qsort_pickpivot(char *__a
, size_t __nel
, size_t __width
,
141 int (*__cmp
)(const void *, const void *,
144 char *__pl
, *__pm
, *__pn
;
148 __pm
= __a
+ (__nel
/ 2) * __width
;
149 __pn
= __a
+ (__nel
- 1) * __width
;
151 __s
= (__nel
/ 8) * __width
;
152 __pl
= __qsort_med3(__pl
, __pl
+ __s
, __pl
+ 2 * __s
, __cmp
, __thunk
);
153 __pm
= __qsort_med3(__pm
- __s
, __pm
, __pm
+ __s
, __cmp
, __thunk
);
154 __pn
= __qsort_med3(__pn
- 2 * __s
, __pn
- __s
, __pn
, __cmp
, __thunk
);
156 return __qsort_med3(__pl
, __pm
, __pn
, __cmp
, __thunk
);
159 // Implementation of quicksort for larger lists.
160 static __inline
void __qsort_quicksort(char *__a
, size_t __nel
, size_t __width
,
161 int (*__cmp
)(const void *, const void *,
164 char *__pa
, *__pb
, *__pc
, *__pd
, *__pn
;
168 // Select pivot and move it to the head of the list.
169 __qsort_swap(__a
, __qsort_pickpivot(__a
, __nel
, __width
, __cmp
, __thunk
),
172 // Perform partitioning.
174 __pc
= __pd
= __a
+ (__nel
- 1) * __width
;
176 while (__pb
<= __pc
&& (__r
= __cmp(__pb
, __a
, __thunk
)) <= 0) {
178 __qsort_swap(__pa
, __pb
, __width
);
183 while (__pc
>= __pb
&& (__r
= __cmp(__pc
, __a
, __thunk
)) >= 0) {
185 __qsort_swap(__pc
, __pd
, __width
);
192 __qsort_swap(__pb
, __pc
, __width
);
197 // Store pivot between the two partitions.
198 __pn
= __a
+ __nel
* __width
;
199 __s
= __qsort_min((size_t)(__pa
- __a
), (size_t)(__pb
- __pa
));
200 __qsort_swap(__a
, __pb
- __s
, __s
);
201 __s
= __qsort_min((size_t)(__pd
- __pc
), (size_t)(__pn
- __pd
) - __width
);
202 __qsort_swap(__pb
, __pn
- __s
, __s
);
204 // Sort the two partitions.
205 __s
= (size_t)(__pb
- __pa
);
206 __qsort_r(__a
, __s
/ __width
, __width
, __cmp
, __thunk
);
207 __s
= (size_t)(__pd
- __pc
);
208 __qsort_r(__pn
- __s
, __s
/ __width
, __width
, __cmp
, __thunk
);
211 static __inline
void __qsort_r(void *__base
, size_t __nel
, size_t __width
,
212 int (*__cmp
)(const void *, const void *, void *),
216 __a
= (char *)__base
;
218 __qsort_insertionsort(__a
, __nel
, __width
, __cmp
, __thunk
);
220 __qsort_quicksort(__a
, __nel
, __width
, __cmp
, __thunk
);
223 #define qsort_r(base, nel, width, compar, thunk) \
224 __qsort_r(base, nel, width, compar, thunk)
226 // qsort(): Call into qsort_r(), providing the callback as the thunk.
227 // We assume that the optimizer is smart enough to simplify.
229 static __inline
int __qsort_cmp(const void *__a
, const void *__b
,
231 return ((int (*)(const void *, const void *))__thunk
)(__a
, __b
);
234 static __inline
void __qsort(void *__base
, size_t __nel
, size_t __width
,
235 int (*__cmp
)(const void *, const void *)) {
236 qsort_r(__base
, __nel
, __width
, __qsort_cmp
, (void *)__cmp
);
238 #define qsort(base, nel, width, compar) __qsort(base, nel, width, compar)