1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(unused_attributes)]
12 #![unstable(feature = "alloc_system",
13 reason
= "this library is unlikely to be stabilized in its current \
16 #![feature(allocator_api)]
17 #![feature(core_intrinsics)]
19 #![feature(staged_api)]
20 #![feature(rustc_attrs)]
21 #![feature(alloc_layout_extra)]
23 all(target_arch
= "wasm32", not(target_os
= "emscripten")),
24 feature(integer_atomics
, stdsimd
)
26 #![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
27 // The minimum alignment guaranteed by the architecture. This value is used to
28 // add fast paths for low alignment values.
29 #[cfg(all(any(target_arch = "x86",
32 target_arch
= "powerpc",
33 target_arch
= "powerpc64",
34 target_arch
= "asmjs",
35 target_arch
= "wasm32")))]
37 const MIN_ALIGN
: usize = 8;
38 #[cfg(all(any(target_arch = "x86_64",
39 target_arch
= "aarch64",
40 target_arch
= "mips64",
41 target_arch
= "s390x",
42 target_arch
= "sparc64")))]
44 const MIN_ALIGN
: usize = 16;
46 /// The default memory allocator provided by the operating system.
48 /// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
49 /// plus related functions.
51 /// This type can be used in a `static` item
52 /// with the `#[global_allocator]` attribute
53 /// to force the global allocator to be the system’s one.
54 /// (The default is jemalloc for executables, on some platforms.)
57 /// use std::alloc::System;
59 /// #[global_allocator]
60 /// static A: System = System;
63 /// let a = Box::new(4); // Allocates from the system allocator.
64 /// println!("{}", a);
68 /// It can also be used directly to allocate memory
69 /// independently of the standard library’s global allocator.
70 #[stable(feature = "alloc_system_type", since = "1.28.0")]
72 #[cfg(any(windows, unix, target_os = "redox"))]
73 mod realloc_fallback
{
74 use core
::alloc
::{GlobalAlloc, Layout}
;
78 pub(crate) unsafe fn realloc_fallback(&self, ptr
: *mut u8, old_layout
: Layout
,
79 new_size
: usize) -> *mut u8 {
80 // Docs for GlobalAlloc::realloc require this to be valid:
81 let new_layout
= Layout
::from_size_align_unchecked(new_size
, old_layout
.align());
82 let new_ptr
= GlobalAlloc
::alloc(self, new_layout
);
83 if !new_ptr
.is_null() {
84 let size
= cmp
::min(old_layout
.size(), new_size
);
85 ptr
::copy_nonoverlapping(ptr
, new_ptr
, size
);
86 GlobalAlloc
::dealloc(self, ptr
, old_layout
);
92 #[cfg(any(unix, target_os = "redox"))]
98 use core
::alloc
::{GlobalAlloc, Layout}
;
99 #[stable(feature = "alloc_system_type", since = "1.28.0")]
100 unsafe impl GlobalAlloc
for System
{
102 unsafe fn alloc(&self, layout
: Layout
) -> *mut u8 {
103 if layout
.align() <= MIN_ALIGN
&& layout
.align() <= layout
.size() {
104 libc
::malloc(layout
.size()) as *mut u8
106 #[cfg(target_os = "macos")]
108 if layout
.align() > (1 << 31) {
109 return ptr
::null_mut()
112 aligned_malloc(&layout
)
116 unsafe fn alloc_zeroed(&self, layout
: Layout
) -> *mut u8 {
117 if layout
.align() <= MIN_ALIGN
&& layout
.align() <= layout
.size() {
118 libc
::calloc(layout
.size(), 1) as *mut u8
120 let ptr
= self.alloc(layout
.clone());
122 ptr
::write_bytes(ptr
, 0, layout
.size());
128 unsafe fn dealloc(&self, ptr
: *mut u8, _layout
: Layout
) {
129 libc
::free(ptr
as *mut libc
::c_void
)
132 unsafe fn realloc(&self, ptr
: *mut u8, layout
: Layout
, new_size
: usize) -> *mut u8 {
133 if layout
.align() <= MIN_ALIGN
&& layout
.align() <= new_size
{
134 libc
::realloc(ptr
as *mut libc
::c_void
, new_size
) as *mut u8
136 self.realloc_fallback(ptr
, layout
, new_size
)
140 #[cfg(any(target_os = "android",
141 target_os
= "hermit",
143 target_os
= "solaris"))]
145 unsafe fn aligned_malloc(layout
: &Layout
) -> *mut u8 {
146 // On android we currently target API level 9 which unfortunately
147 // doesn't have the `posix_memalign` API used below. Instead we use
148 // `memalign`, but this unfortunately has the property on some systems
149 // where the memory returned cannot be deallocated by `free`!
151 // Upon closer inspection, however, this appears to work just fine with
152 // Android, so for this platform we should be fine to call `memalign`
153 // (which is present in API level 9). Some helpful references could
154 // possibly be chromium using memalign [1], attempts at documenting that
155 // memalign + free is ok [2] [3], or the current source of chromium
156 // which still uses memalign on android [4].
158 // [1]: https://codereview.chromium.org/10796020/
159 // [2]: https://code.google.com/p/android/issues/detail?id=35391
160 // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
161 // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
162 // /memory/aligned_memory.cc
163 libc
::memalign(layout
.align(), layout
.size()) as *mut u8
165 #[cfg(not(any(target_os = "android",
166 target_os
= "hermit",
168 target_os
= "solaris")))]
170 unsafe fn aligned_malloc(layout
: &Layout
) -> *mut u8 {
171 let mut out
= ptr
::null_mut();
172 let ret
= libc
::posix_memalign(&mut out
, layout
.align(), layout
.size());
181 #[allow(nonstandard_style)]
185 use core
::alloc
::{GlobalAlloc, Layout}
;
186 type LPVOID
= *mut u8;
187 type HANDLE
= LPVOID
;
192 fn GetProcessHeap() -> HANDLE
;
193 fn HeapAlloc(hHeap
: HANDLE
, dwFlags
: DWORD
, dwBytes
: SIZE_T
) -> LPVOID
;
194 fn HeapReAlloc(hHeap
: HANDLE
, dwFlags
: DWORD
, lpMem
: LPVOID
, dwBytes
: SIZE_T
) -> LPVOID
;
195 fn HeapFree(hHeap
: HANDLE
, dwFlags
: DWORD
, lpMem
: LPVOID
) -> BOOL
;
196 fn GetLastError() -> DWORD
;
199 struct Header(*mut u8);
200 const HEAP_ZERO_MEMORY
: DWORD
= 0x00000008;
201 unsafe fn get_header
<'a
>(ptr
: *mut u8) -> &'a
mut Header
{
202 &mut *(ptr
as *mut Header
).offset(-1)
204 unsafe fn align_ptr(ptr
: *mut u8, align
: usize) -> *mut u8 {
205 let aligned
= ptr
.add(align
- (ptr
as usize & (align
- 1)));
206 *get_header(aligned
) = Header(ptr
);
210 unsafe fn allocate_with_flags(layout
: Layout
, flags
: DWORD
) -> *mut u8 {
211 let ptr
= if layout
.align() <= MIN_ALIGN
{
212 HeapAlloc(GetProcessHeap(), flags
, layout
.size())
214 let size
= layout
.size() + layout
.align();
215 let ptr
= HeapAlloc(GetProcessHeap(), flags
, size
);
219 align_ptr(ptr
, layout
.align())
224 #[stable(feature = "alloc_system_type", since = "1.28.0")]
225 unsafe impl GlobalAlloc
for System
{
227 unsafe fn alloc(&self, layout
: Layout
) -> *mut u8 {
228 allocate_with_flags(layout
, 0)
231 unsafe fn alloc_zeroed(&self, layout
: Layout
) -> *mut u8 {
232 allocate_with_flags(layout
, HEAP_ZERO_MEMORY
)
235 unsafe fn dealloc(&self, ptr
: *mut u8, layout
: Layout
) {
236 if layout
.align() <= MIN_ALIGN
{
237 let err
= HeapFree(GetProcessHeap(), 0, ptr
as LPVOID
);
238 debug_assert
!(err
!= 0, "Failed to free heap memory: {}",
241 let header
= get_header(ptr
);
242 let err
= HeapFree(GetProcessHeap(), 0, header
.0 as LPVOID
);
243 debug_assert
!(err
!= 0, "Failed to free heap memory: {}",
248 unsafe fn realloc(&self, ptr
: *mut u8, layout
: Layout
, new_size
: usize) -> *mut u8 {
249 if layout
.align() <= MIN_ALIGN
{
250 HeapReAlloc(GetProcessHeap(), 0, ptr
as LPVOID
, new_size
) as *mut u8
252 self.realloc_fallback(ptr
, layout
, new_size
)
257 // This is an implementation of a global allocator on the wasm32 platform when
258 // emscripten is not in use. In that situation there's no actual runtime for us
259 // to lean on for allocation, so instead we provide our own!
261 // The wasm32 instruction set has two instructions for getting the current
262 // amount of memory and growing the amount of memory. These instructions are the
263 // foundation on which we're able to build an allocator, so we do so! Note that
264 // the instructions are also pretty "global" and this is the "global" allocator
267 // The current allocator here is the `dlmalloc` crate which we've got included
268 // in the rust-lang/rust repository as a submodule. The crate is a port of
269 // dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
270 // for now which is currently technically required (can't link with C yet).
272 // The crate itself provides a global allocator which on wasm has no
273 // synchronization as there are no threads!
274 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
276 extern crate dlmalloc
;
277 use core
::alloc
::{GlobalAlloc, Layout}
;
279 static mut DLMALLOC
: dlmalloc
::Dlmalloc
= dlmalloc
::DLMALLOC_INIT
;
280 #[stable(feature = "alloc_system_type", since = "1.28.0")]
281 unsafe impl GlobalAlloc
for System
{
283 unsafe fn alloc(&self, layout
: Layout
) -> *mut u8 {
284 let _lock
= lock
::lock();
285 DLMALLOC
.malloc(layout
.size(), layout
.align())
288 unsafe fn alloc_zeroed(&self, layout
: Layout
) -> *mut u8 {
289 let _lock
= lock
::lock();
290 DLMALLOC
.calloc(layout
.size(), layout
.align())
293 unsafe fn dealloc(&self, ptr
: *mut u8, layout
: Layout
) {
294 let _lock
= lock
::lock();
295 DLMALLOC
.free(ptr
, layout
.size(), layout
.align())
298 unsafe fn realloc(&self, ptr
: *mut u8, layout
: Layout
, new_size
: usize) -> *mut u8 {
299 let _lock
= lock
::lock();
300 DLMALLOC
.realloc(ptr
, layout
.size(), layout
.align(), new_size
)
303 #[cfg(target_feature = "atomics")]
305 use core
::arch
::wasm32
;
306 use core
::sync
::atomic
::{AtomicI32, Ordering::SeqCst}
;
307 static LOCKED
: AtomicI32
= AtomicI32
::new(0);
309 pub fn lock() -> DropLock
{
311 if LOCKED
.swap(1, SeqCst
) == 0 {
315 let r
= wasm32
::atomic
::wait_i32(
316 &LOCKED
as *const AtomicI32
as *mut i32,
320 debug_assert
!(r
== 0 || r
== 1);
324 impl Drop
for DropLock
{
326 let r
= LOCKED
.swap(0, SeqCst
);
327 debug_assert_eq
!(r
, 1);
329 wasm32
::atomic
::wake(
330 &LOCKED
as *const AtomicI32
as *mut i32,
331 1, // only one thread
337 #[cfg(not(target_feature = "atomics"))]
340 pub fn lock() {}
// no atomics, no threads, that's easy!