1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Implementation of Rust stack unwinding
13 //! For background on exception handling and stack unwinding please see
14 //! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
15 //! documents linked from it.
16 //! These are also good reads:
17 //! http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
18 //! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
19 //! http://www.airs.com/blog/index.php?s=exception+frames
21 //! ## A brief summary
23 //! Exception handling happens in two phases: a search phase and a cleanup phase.
25 //! In both phases the unwinder walks stack frames from top to bottom using
26 //! information from the stack frame unwind sections of the current process's
27 //! modules ("module" here refers to an OS module, i.e. an executable or a
30 //! For each stack frame, it invokes the associated "personality routine", whose
31 //! address is also stored in the unwind info section.
33 //! In the search phase, the job of a personality routine is to examine exception
34 //! object being thrown, and to decide whether it should be caught at that stack
35 //! frame. Once the handler frame has been identified, cleanup phase begins.
37 //! In the cleanup phase, personality routines invoke cleanup code associated
38 //! with their stack frames (i.e. destructors). Once stack has been unwound down
39 //! to the handler frame level, unwinding stops and the last personality routine
40 //! transfers control to its catch block.
42 //! ## Frame unwind info registration
44 //! Each module has its own frame unwind info section (usually ".eh_frame"), and
45 //! unwinder needs to know about all of them in order for unwinding to be able to
46 //! cross module boundaries.
48 //! On some platforms, like Linux, this is achieved by dynamically enumerating
49 //! currently loaded modules via the dl_iterate_phdr() API and finding all
50 //! .eh_frame sections.
52 //! Others, like Windows, require modules to actively register their unwind info
53 //! sections by calling __register_frame_info() API at startup. In the latter
54 //! case it is essential that there is only one copy of the unwinder runtime in
55 //! the process. This is usually achieved by linking to the dynamic version of
56 //! the unwind runtime.
58 //! Currently Rust uses unwind runtime provided by libgcc.
61 #![allow(unused_imports)]
74 use sync
::atomic
::{self, Ordering}
;
75 use sys_common
::mutex
::Mutex
;
77 // The actual unwinding implementation is cfg'd here, and we've got two current
78 // implementations. One goes through SEH on Windows and the other goes through
79 // libgcc via the libunwind-like API.
80 #[cfg(target_env = "msvc")] #[path = "seh.rs"] #[doc(hidden)]
82 #[cfg(not(target_env = "msvc"))] #[path = "gcc.rs"] #[doc(hidden)]
85 pub type Callback
= fn(msg
: &(Any
+ Send
), file
: &'
static str, line
: u32);
87 // Variables used for invoking callbacks when a thread starts to unwind.
89 // For more information, see below.
90 const MAX_CALLBACKS
: usize = 16;
91 static CALLBACKS
: [atomic
::AtomicUsize
; MAX_CALLBACKS
] =
92 [atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
93 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
94 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
95 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
96 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
97 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
98 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0),
99 atomic
::AtomicUsize
::new(0), atomic
::AtomicUsize
::new(0)];
100 static CALLBACK_CNT
: atomic
::AtomicUsize
= atomic
::AtomicUsize
::new(0);
102 thread_local
! { static PANICKING: Cell<bool> = Cell::new(false) }
104 #[link(name = "rustrt_native", kind = "static")]
108 /// Invoke a closure, capturing the cause of panic if one occurs.
110 /// This function will return `Ok(())` if the closure did not panic, and will
111 /// return `Err(cause)` if the closure panics. The `cause` returned is the
112 /// object with which panic was originally invoked.
114 /// This function also is unsafe for a variety of reasons:
116 /// * This is not safe to call in a nested fashion. The unwinding
117 /// interface for Rust is designed to have at most one try/catch block per
118 /// thread, not multiple. No runtime checking is currently performed to uphold
119 /// this invariant, so this function is not safe. A nested try/catch block
120 /// may result in corruption of the outer try/catch block's state, especially
121 /// if this is used within a thread itself.
123 /// * It is not sound to trigger unwinding while already unwinding. Rust threads
124 /// have runtime checks in place to ensure this invariant, but it is not
125 /// guaranteed that a rust thread is in place when invoking this function.
126 /// Unwinding twice can lead to resource leaks where some destructors are not
128 pub unsafe fn try
<F
: FnOnce()>(f
: F
) -> Result
<(), Box
<Any
+ Send
>> {
130 return inner_try(try_fn
::<F
>, &mut f
as *mut _
as *mut c_void
);
132 // If an inner function were not used here, then this generic function `try`
133 // uses the native symbol `rust_try`, for which the code is statically
134 // linked into the standard library. This means that the DLL for the
135 // standard library must have `rust_try` as an exposed symbol that
136 // downstream crates can link against (because monomorphizations of `try` in
137 // downstream crates will have a reference to the `rust_try` symbol).
139 // On MSVC this requires the symbol `rust_try` to be tagged with
140 // `dllexport`, but it's easier to not have conditional `src/rt/rust_try.ll`
141 // files and instead just have this non-generic shim the compiler can take
142 // care of exposing correctly.
143 unsafe fn inner_try(f
: extern fn(*mut c_void
), data
: *mut c_void
)
144 -> Result
<(), Box
<Any
+ Send
>> {
145 let prev
= PANICKING
.with(|s
| s
.get());
146 PANICKING
.with(|s
| s
.set(false));
147 let ep
= rust_try(f
, data
);
148 PANICKING
.with(|s
| s
.set(prev
));
152 Err(imp
::cleanup(ep
))
156 extern fn try_fn
<F
: FnOnce()>(opt_closure
: *mut c_void
) {
157 let opt_closure
= opt_closure
as *mut Option
<F
>;
158 unsafe { (*opt_closure).take().unwrap()(); }
163 // When f(...) returns normally, the return value is null.
164 // When f(...) throws, the return value is a pointer to the caught
166 fn rust_try(f
: extern fn(*mut c_void
),
167 data
: *mut c_void
) -> *mut c_void
;
171 /// Determines whether the current thread is unwinding because of panic.
172 pub fn panicking() -> bool
{
173 PANICKING
.with(|s
| s
.get())
176 // An uninlined, unmangled function upon which to slap yer breakpoints
179 #[allow(private_no_mangle_fns)]
180 fn rust_panic(cause
: Box
<Any
+ Send
+ '
static>) -> ! {
181 rtdebug
!("begin_unwind()");
188 /// Entry point of panic from the libcore crate.
189 #[lang = "panic_fmt"]
190 pub extern fn rust_begin_unwind(msg
: fmt
::Arguments
,
191 file
: &'
static str, line
: u32) -> ! {
192 begin_unwind_fmt(msg
, &(file
, line
))
195 /// The entry point for unwinding with a formatted message.
197 /// This is designed to reduce the amount of code required at the call
198 /// site as much as possible (so that `panic!()` has as low an impact
199 /// on (e.g.) the inlining of other functions as possible), by moving
200 /// the actual formatting into this shared place.
201 #[inline(never)] #[cold]
202 pub fn begin_unwind_fmt(msg
: fmt
::Arguments
, file_line
: &(&'
static str, u32)) -> ! {
205 // We do two allocations here, unfortunately. But (a) they're
206 // required with the current scheme, and (b) we don't handle
207 // panic + OOM properly anyway (see comment in begin_unwind
210 let mut s
= String
::new();
211 let _
= s
.write_fmt(msg
);
212 begin_unwind_inner(Box
::new(s
), file_line
)
215 /// This is the entry point of unwinding for panic!() and assert!().
216 #[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
217 pub fn begin_unwind
<M
: Any
+ Send
>(msg
: M
, file_line
: &(&'
static str, u32)) -> ! {
218 // Note that this should be the only allocation performed in this code path.
219 // Currently this means that panic!() on OOM will invoke this code path,
220 // but then again we're not really ready for panic on OOM anyway. If
221 // we do start doing this, then we should propagate this allocation to
222 // be performed in the parent of this thread instead of the thread that's
225 // see below for why we do the `Any` coercion here.
226 begin_unwind_inner(Box
::new(msg
), file_line
)
229 /// The core of the unwinding.
231 /// This is non-generic to avoid instantiation bloat in other crates
232 /// (which makes compilation of small crates noticeably slower). (Note:
233 /// we need the `Any` object anyway, we're not just creating it to
234 /// avoid being generic.)
236 /// Doing this split took the LLVM IR line counts of `fn main() { panic!()
237 /// }` from ~1900/3700 (-O/no opts) to 180/590.
238 #[inline(never)] #[cold] // this is the slow path, please never inline this
239 fn begin_unwind_inner(msg
: Box
<Any
+ Send
>,
240 file_line
: &(&'
static str, u32)) -> ! {
241 // Make sure the default failure handler is registered before we look at the
242 // callbacks. We also use a raw sys-based mutex here instead of a
243 // `std::sync` one as accessing TLS can cause weird recursive problems (and
244 // we don't need poison checking).
246 static LOCK
: Mutex
= Mutex
::new();
247 static mut INIT
: bool
= false;
250 register(panicking
::on_panic
);
256 // First, invoke call the user-defined callbacks triggered on thread panic.
258 // By the time that we see a callback has been registered (by reading
259 // MAX_CALLBACKS), the actual callback itself may have not been stored yet,
260 // so we just chalk it up to a race condition and move on to the next
261 // callback. Additionally, CALLBACK_CNT may briefly be higher than
262 // MAX_CALLBACKS, so we're sure to clamp it as necessary.
264 let amt
= CALLBACK_CNT
.load(Ordering
::SeqCst
);
265 &CALLBACKS
[..cmp
::min(amt
, MAX_CALLBACKS
)]
267 for cb
in callbacks
{
268 match cb
.load(Ordering
::SeqCst
) {
271 let f
: Callback
= unsafe { mem::transmute(n) }
;
272 let (file
, line
) = *file_line
;
273 f(&*msg
, file
, line
);
278 // Now that we've run all the necessary unwind callbacks, we actually
279 // perform the unwinding.
281 // If a thread panics while it's already unwinding then we
282 // have limited options. Currently our preference is to
283 // just abort. In the future we may consider resuming
284 // unwinding or otherwise exiting the thread cleanly.
285 rterrln
!("thread panicked while panicking. aborting.");
286 unsafe { intrinsics::abort() }
288 PANICKING
.with(|s
| s
.set(true));
292 /// Register a callback to be invoked when a thread unwinds.
294 /// This is an unsafe and experimental API which allows for an arbitrary
295 /// callback to be invoked when a thread panics. This callback is invoked on both
296 /// the initial unwinding and a double unwinding if one occurs. Additionally,
297 /// the local `Thread` will be in place for the duration of the callback, and
298 /// the callback must ensure that it remains in place once the callback returns.
300 /// Only a limited number of callbacks can be registered, and this function
301 /// returns whether the callback was successfully registered or not. It is not
302 /// currently possible to unregister a callback once it has been registered.
303 pub unsafe fn register(f
: Callback
) -> bool
{
304 match CALLBACK_CNT
.fetch_add(1, Ordering
::SeqCst
) {
305 // The invocation code has knowledge of this window where the count has
306 // been incremented, but the callback has not been stored. We're
307 // guaranteed that the slot we're storing into is 0.
308 n
if n
< MAX_CALLBACKS
=> {
309 let prev
= CALLBACKS
[n
].swap(mem
::transmute(f
), Ordering
::SeqCst
);
310 rtassert
!(prev
== 0);
313 // If we accidentally bumped the count too high, pull it back.
315 CALLBACK_CNT
.store(MAX_CALLBACKS
, Ordering
::SeqCst
);