]>
git.proxmox.com Git - rustc.git/blob - src/libstd/sys/common/stack.rs
fadeebc8150017518a258cec3f653cb9924b6330
1 // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Rust stack-limit management
13 //! Currently Rust uses a segmented-stack-like scheme in order to detect stack
14 //! overflow for rust threads. In this scheme, the prologue of all functions are
15 //! preceded with a check to see whether the current stack limits are being
18 //! This module provides the functionality necessary in order to manage these
19 //! stack limits (which are stored in platform-specific locations). The
20 //! functions here are used at the borders of the thread lifetime in order to
21 //! manage these limits.
23 //! This function is an unstable module because this scheme for stack overflow
24 //! detection is not guaranteed to continue in the future. Usage of this module
25 //! is discouraged unless absolutely necessary.
29 // It is possible to implement it using idea from
30 // http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
32 // In short: _pthread_{get,set}_specific_direct allows extremely fast
33 // access, exactly what is required for segmented stack
34 // There is a pool of reserved slots for Apple internal use (0..119)
35 // First dynamic allocated pthread key starts with 257 (on iOS7)
36 // So using slot 149 should be pretty safe ASSUMING space is reserved
37 // for every key < first dynamic key
39 // There is also an opportunity to steal keys reserved for Garbage Collection
40 // ranges 80..89 and 110..119, especially considering the fact Garbage Collection
41 // never supposed to work on iOS. But as everybody knows it - there is a chance
42 // that those slots will be re-used, like it happened with key 95 (moved from
43 // JavaScriptCore to CoreText)
45 // Unfortunately Apple rejected patch to LLVM which generated
46 // corresponding prolog, decision was taken to disable segmented
47 // stack support on iOS.
49 pub const RED_ZONE
: usize = 20 * 1024;
51 /// This function is invoked from rust's current __morestack function. Segmented
52 /// stacks are currently not enabled as segmented stacks, but rather one giant
53 /// stack segment. This means that whenever we run out of stack, we want to
54 /// truly consider it to be stack overflow rather than allocating a new stack.
55 #[cfg(not(test))] // in testing, use the original libstd's version
56 #[lang = "stack_exhausted"]
57 extern fn stack_exhausted() {
61 // We're calling this function because the stack just ran out. We need
62 // to call some other rust functions, but if we invoke the functions
63 // right now it'll just trigger this handler being called again. In
64 // order to alleviate this, we move the stack limit to be inside of the
65 // red zone that was allocated for exactly this reason.
66 let limit
= get_sp_limit();
67 record_sp_limit(limit
- RED_ZONE
/ 2);
69 // This probably isn't the best course of action. Ideally one would want
70 // to unwind the stack here instead of just aborting the entire process.
71 // This is a tricky problem, however. There's a few things which need to
74 // 1. We're here because of a stack overflow, yet unwinding will run
75 // destructors and hence arbitrary code. What if that code overflows
76 // the stack? One possibility is to use the above allocation of an
77 // extra 10k to hope that we don't hit the limit, and if we do then
78 // abort the whole program. Not the best, but kind of hard to deal
79 // with unless we want to switch stacks.
81 // 2. LLVM will optimize functions based on whether they can unwind or
82 // not. It will flag functions with 'nounwind' if it believes that
83 // the function cannot trigger unwinding, but if we do unwind on
84 // stack overflow then it means that we could unwind in any function
85 // anywhere. We would have to make sure that LLVM only places the
86 // nounwind flag on functions which don't call any other functions.
88 // 3. The function that overflowed may have owned arguments. These
89 // arguments need to have their destructors run, but we haven't even
90 // begun executing the function yet, so unwinding will not run the
91 // any landing pads for these functions. If this is ignored, then
92 // the arguments will just be leaked.
94 // Exactly what to do here is a very delicate topic, and is possibly
95 // still up in the air for what exactly to do. Some relevant issues:
97 // #3555 - out-of-stack failure leaks arguments
98 // #3695 - should there be a stack limit?
99 // #9855 - possible strategies which could be taken
100 // #9854 - unwinding on windows through __morestack has never worked
101 // #2361 - possible implementation of not using landing pads
103 ::rt
::util
::report_overflow();
109 // Windows maintains a record of upper and lower stack bounds in the Thread Information
110 // Block (TIB), and some syscalls do check that addresses which are supposed to be in
111 // the stack, indeed lie between these two values.
112 // (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
114 // When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
115 // For OS-managed stacks (libnative), we let the OS manage them for us.
117 // On all other platforms both variants behave identically.
120 pub unsafe fn record_os_managed_stack_bounds(stack_lo
: usize, _stack_hi
: usize) {
121 record_sp_limit(stack_lo
+ RED_ZONE
);
124 /// Records the current limit of the stack as specified by `end`.
126 /// This is stored in an OS-dependent location, likely inside of the thread
127 /// local storage. The location that the limit is stored is a pre-ordained
128 /// location because it's where LLVM has emitted code to check.
130 /// Note that this cannot be called under normal circumstances. This function is
131 /// changing the stack limit, so upon returning any further function calls will
132 /// possibly be triggering the morestack logic if you're not careful.
134 /// Also note that this and all of the inside functions are all flagged as
135 /// "inline(always)" because they're messing around with the stack limits. This
136 /// would be unfortunate for the functions themselves to trigger a morestack
137 /// invocation (if they were an actual function call).
139 pub unsafe fn record_sp_limit(limit
: usize) {
140 return target_record_sp_limit(limit
);
143 #[cfg(all(target_arch = "x86_64",
144 any(target_os
= "macos", target_os
= "ios")))]
146 unsafe fn target_record_sp_limit(limit
: usize) {
147 asm
!("movq $$0x60+90*8, %rsi
148 movq $0, %gs:(%rsi)" :: "r"(limit
) : "rsi" : "volatile")
150 #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
151 unsafe fn target_record_sp_limit(limit
: usize) {
152 asm
!("movq $0, %fs:112" :: "r"(limit
) :: "volatile")
154 #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
155 unsafe fn target_record_sp_limit(_
: usize) {
157 #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
158 unsafe fn target_record_sp_limit(limit
: usize) {
159 asm
!("movq $0, %fs:24" :: "r"(limit
) :: "volatile")
161 #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))]
163 unsafe fn target_record_sp_limit(limit
: usize) {
164 asm
!("movq $0, %fs:32" :: "r"(limit
) :: "volatile")
168 #[cfg(all(target_arch = "x86",
169 any(target_os
= "macos", target_os
= "ios")))]
171 unsafe fn target_record_sp_limit(limit
: usize) {
172 asm
!("movl $$0x48+90*4, %eax
173 movl $0, %gs:(%eax)" :: "r"(limit
) : "eax" : "volatile")
175 #[cfg(all(target_arch = "x86",
176 any(target_os
= "linux", target_os
= "freebsd")))]
178 unsafe fn target_record_sp_limit(limit
: usize) {
179 asm
!("movl $0, %gs:48" :: "r"(limit
) :: "volatile")
181 #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
182 unsafe fn target_record_sp_limit(_
: usize) {
185 // mips, arm - Some brave soul can port these to inline asm, but it's over
186 // my head personally
187 #[cfg(any(target_arch = "mips",
188 target_arch
= "mipsel",
189 all(target_arch
= "arm", not(target_os
= "ios"))))]
191 unsafe fn target_record_sp_limit(limit
: usize) {
193 return record_sp_limit(limit
as *const c_void
);
195 fn record_sp_limit(limit
: *const c_void
);
199 // aarch64 - FIXME(AARCH64): missing...
200 // powerpc - FIXME(POWERPC): missing...
201 // arm-ios - iOS segmented stack is disabled for now, see related notes
202 // openbsd - segmented stack is disabled
203 #[cfg(any(target_arch = "aarch64",
204 target_arch
= "powerpc",
205 all(target_arch
= "arm", target_os
= "ios"),
206 target_os
= "bitrig",
207 target_os
= "openbsd"))]
208 unsafe fn target_record_sp_limit(_
: usize) {
212 /// The counterpart of the function above, this function will fetch the current
213 /// stack limit stored in TLS.
215 /// Note that all of these functions are meant to be exact counterparts of their
216 /// brethren above, except that the operands are reversed.
218 /// As with the setter, this function does not have a __morestack header and can
219 /// therefore be called in a "we're out of stack" situation.
221 pub unsafe fn get_sp_limit() -> usize {
222 return target_get_sp_limit();
225 #[cfg(all(target_arch = "x86_64",
226 any(target_os
= "macos", target_os
= "ios")))]
228 unsafe fn target_get_sp_limit() -> usize {
230 asm
!("movq $$0x60+90*8, %rsi
231 movq %gs:(%rsi), $0" : "=r"(limit
) :: "rsi" : "volatile");
234 #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
235 unsafe fn target_get_sp_limit() -> usize {
237 asm
!("movq %fs:112, $0" : "=r"(limit
) ::: "volatile");
240 #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
241 unsafe fn target_get_sp_limit() -> usize {
244 #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
245 unsafe fn target_get_sp_limit() -> usize {
247 asm
!("movq %fs:24, $0" : "=r"(limit
) ::: "volatile");
250 #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))]
252 unsafe fn target_get_sp_limit() -> usize {
254 asm
!("movq %fs:32, $0" : "=r"(limit
) ::: "volatile");
259 #[cfg(all(target_arch = "x86",
260 any(target_os
= "macos", target_os
= "ios")))]
262 unsafe fn target_get_sp_limit() -> usize {
264 asm
!("movl $$0x48+90*4, %eax
265 movl %gs:(%eax), $0" : "=r"(limit
) :: "eax" : "volatile");
268 #[cfg(all(target_arch = "x86",
269 any(target_os
= "linux", target_os
= "freebsd")))]
271 unsafe fn target_get_sp_limit() -> usize {
273 asm
!("movl %gs:48, $0" : "=r"(limit
) ::: "volatile");
276 #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
277 unsafe fn target_get_sp_limit() -> usize {
281 // mips, arm - Some brave soul can port these to inline asm, but it's over
282 // my head personally
283 #[cfg(any(target_arch = "mips",
284 target_arch
= "mipsel",
285 all(target_arch
= "arm", not(target_os
= "ios"))))]
287 unsafe fn target_get_sp_limit() -> usize {
289 return get_sp_limit() as usize;
291 fn get_sp_limit() -> *const c_void
;
295 // aarch64 - FIXME(AARCH64): missing...
296 // powerpc - FIXME(POWERPC): missing...
297 // arm-ios - iOS doesn't support segmented stacks yet.
298 // openbsd - OpenBSD doesn't support segmented stacks.
300 // This function might be called by runtime though
301 // so it is unsafe to unreachable, let's return a fixed constant.
302 #[cfg(any(target_arch = "aarch64",
303 target_arch
= "powerpc",
304 all(target_arch
= "arm", target_os
= "ios"),
305 target_os
= "bitrig",
306 target_os
= "openbsd"))]
308 unsafe fn target_get_sp_limit() -> usize {