]> git.proxmox.com Git - rustc.git/blame - src/libcore/intrinsics.rs
New upstream version 1.13.0+dfsg1
[rustc.git] / src / libcore / intrinsics.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! rustc compiler intrinsics.
12//!
3157f602 13//! The corresponding definitions are in librustc_trans/intrinsic.rs.
1a4d82fc
JJ
14//!
15//! # Volatiles
16//!
17//! The volatile intrinsics provide operations intended to act on I/O
18//! memory, which are guaranteed to not be reordered by the compiler
19//! across other volatile intrinsics. See the LLVM documentation on
20//! [[volatile]].
21//!
22//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
23//!
24//! # Atomics
25//!
26//! The atomic intrinsics provide common atomic operations on machine
27//! words, with multiple possible memory orderings. They obey the same
28//! semantics as C++11. See the LLVM documentation on [[atomics]].
29//!
30//! [atomics]: http://llvm.org/docs/Atomics.html
31//!
32//! A quick refresher on memory ordering:
33//!
34//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
35//! take place after the barrier.
36//! * Release - a barrier for releasing a lock. Preceding reads and writes
37//! take place before the barrier.
38//! * Sequentially consistent - sequentially consistent operations are
39//! guaranteed to happen in order. This is the standard mode for working
40//! with atomic types and is equivalent to Java's `volatile`.
41
62682a34
SL
42#![unstable(feature = "core_intrinsics",
43 reason = "intrinsics are unlikely to ever be stabilized, instead \
44 they should be used through stabilized interfaces \
e9174d1e
SL
45 in the rest of the standard library",
46 issue = "0")]
1a4d82fc
JJ
47#![allow(missing_docs)]
48
1a4d82fc
JJ
49extern "rust-intrinsic" {
50
62682a34 51 // NB: These intrinsics take raw pointers because they mutate aliased
1a4d82fc
JJ
52 // memory, which is not valid for either `&` or `&mut`.
53
54a0048b 54 pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 55 pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 56 pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 57 pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 58 pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 59 pub fn atomic_cxchg_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 60 pub fn atomic_cxchg_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b 61 pub fn atomic_cxchg_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
54a0048b
SL
62 pub fn atomic_cxchg_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
63
7453a54e 64 pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 65 pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 66 pub fn atomic_cxchgweak_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 67 pub fn atomic_cxchgweak_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 68 pub fn atomic_cxchgweak_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 69 pub fn atomic_cxchgweak_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 70 pub fn atomic_cxchgweak_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 71 pub fn atomic_cxchgweak_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
7453a54e 72 pub fn atomic_cxchgweak_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
1a4d82fc
JJ
73
74 pub fn atomic_load<T>(src: *const T) -> T;
75 pub fn atomic_load_acq<T>(src: *const T) -> T;
76 pub fn atomic_load_relaxed<T>(src: *const T) -> T;
77 pub fn atomic_load_unordered<T>(src: *const T) -> T;
78
79 pub fn atomic_store<T>(dst: *mut T, val: T);
80 pub fn atomic_store_rel<T>(dst: *mut T, val: T);
81 pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
82 pub fn atomic_store_unordered<T>(dst: *mut T, val: T);
83
84 pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
85 pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
86 pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
87 pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
88 pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
89
90 pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
91 pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
92 pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
93 pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
94 pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
95
96 pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
97 pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
98 pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
99 pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
100 pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
101
102 pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
103 pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
104 pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
105 pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
106 pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
107
108 pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
109 pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
110 pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
111 pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
112 pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
113
114 pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
115 pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
116 pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
117 pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
118 pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
119
120 pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
121 pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
122 pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
123 pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
124 pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
125
126 pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
127 pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
128 pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
129 pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
130 pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
131
132 pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
133 pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
134 pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
135 pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
136 pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
137
138 pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
139 pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
140 pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
141 pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
142 pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
143
144 pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
145 pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
146 pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
147 pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
148 pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
149}
150
151extern "rust-intrinsic" {
152
153 pub fn atomic_fence();
154 pub fn atomic_fence_acq();
155 pub fn atomic_fence_rel();
156 pub fn atomic_fence_acqrel();
157
d9579d0f
AL
158 /// A compiler-only memory barrier.
159 ///
62682a34
SL
160 /// Memory accesses will never be reordered across this barrier by the
161 /// compiler, but no instructions will be emitted for it. This is
162 /// appropriate for operations on the same thread that may be preempted,
163 /// such as when interacting with signal handlers.
d9579d0f 164 pub fn atomic_singlethreadfence();
d9579d0f 165 pub fn atomic_singlethreadfence_acq();
d9579d0f 166 pub fn atomic_singlethreadfence_rel();
d9579d0f
AL
167 pub fn atomic_singlethreadfence_acqrel();
168
3157f602
XL
169 /// Magic intrinsic that derives its meaning from attributes
170 /// attached to the function.
171 ///
172 /// For example, dataflow uses this to inject static assertions so
173 /// that `rustc_peek(potentially_uninitialized)` would actually
174 /// double-check that dataflow did indeed compute that it is
175 /// uninitialized at that point in the control flow.
3157f602
XL
176 pub fn rustc_peek<T>(_: T) -> T;
177
9346a6ac 178 /// Aborts the execution of the process.
1a4d82fc
JJ
179 pub fn abort() -> !;
180
9346a6ac 181 /// Tells LLVM that this point in the code is not reachable,
1a4d82fc
JJ
182 /// enabling further optimizations.
183 ///
184 /// NB: This is very different from the `unreachable!()` macro!
185 pub fn unreachable() -> !;
186
9346a6ac 187 /// Informs the optimizer that a condition is always true.
1a4d82fc
JJ
188 /// If the condition is false, the behavior is undefined.
189 ///
190 /// No code is generated for this intrinsic, but the optimizer will try
191 /// to preserve it (and its condition) between passes, which may interfere
192 /// with optimization of surrounding code and reduce performance. It should
193 /// not be used if the invariant can be discovered by the optimizer on its
194 /// own, or if it does not enable any significant optimizations.
195 pub fn assume(b: bool);
196
9e0c209e
SL
197 #[cfg(not(stage0))]
198 /// Hints to the compiler that branch condition is likely to be true.
199 /// Returns the value passed to it.
200 ///
201 /// Any use other than with `if` statements will probably not have an effect.
202 pub fn likely(b: bool) -> bool;
203
204 #[cfg(not(stage0))]
205 /// Hints to the compiler that branch condition is likely to be false.
206 /// Returns the value passed to it.
207 ///
208 /// Any use other than with `if` statements will probably not have an effect.
209 pub fn unlikely(b: bool) -> bool;
210
9346a6ac 211 /// Executes a breakpoint trap, for inspection by a debugger.
1a4d82fc
JJ
212 pub fn breakpoint();
213
214 /// The size of a type in bytes.
215 ///
a7813a04
XL
216 /// More specifically, this is the offset in bytes between successive
217 /// items of the same type, including alignment padding.
85aaf69f 218 pub fn size_of<T>() -> usize;
1a4d82fc 219
9346a6ac 220 /// Moves a value to an uninitialized memory location.
1a4d82fc
JJ
221 ///
222 /// Drop glue is not run on the destination.
c1a9b12d 223 pub fn move_val_init<T>(dst: *mut T, src: T);
1a4d82fc 224
85aaf69f
SL
225 pub fn min_align_of<T>() -> usize;
226 pub fn pref_align_of<T>() -> usize;
1a4d82fc 227
d9579d0f 228 pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
d9579d0f 229 pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
92a42be0
SL
230
231 /// Executes the destructor (if any) of the pointed-to value.
232 ///
233 /// This has two use cases:
234 ///
235 /// * It is *required* to use `drop_in_place` to drop unsized types like
236 /// trait objects, because they can't be read out onto the stack and
237 /// dropped normally.
238 ///
239 /// * It is friendlier to the optimizer to do this over `ptr::read` when
240 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
241 /// as the compiler doesn't need to prove that it's sound to elide the
242 /// copy.
243 ///
244 /// # Undefined Behavior
245 ///
246 /// This has all the same safety problems as `ptr::read` with respect to
247 /// invalid pointers, types, and double drops.
7453a54e 248 #[stable(feature = "drop_in_place", since = "1.8.0")]
92a42be0 249 pub fn drop_in_place<T: ?Sized>(to_drop: *mut T);
d9579d0f 250
c34b1796
AL
251 /// Gets a static string slice containing the name of a type.
252 pub fn type_name<T: ?Sized>() -> &'static str;
1a4d82fc
JJ
253
254 /// Gets an identifier which is globally unique to the specified type. This
255 /// function will return the same value for a type regardless of whichever
256 /// crate it is invoked in.
85aaf69f 257 pub fn type_id<T: ?Sized + 'static>() -> u64;
1a4d82fc 258
9346a6ac 259 /// Creates a value initialized to zero.
1a4d82fc
JJ
260 ///
261 /// `init` is unsafe because it returns a zeroed-out datum,
c34b1796
AL
262 /// which is unsafe unless T is `Copy`. Also, even if T is
263 /// `Copy`, an all-zero value may not correspond to any legitimate
264 /// state for the type in question.
1a4d82fc
JJ
265 pub fn init<T>() -> T;
266
9346a6ac 267 /// Creates an uninitialized value.
c34b1796
AL
268 ///
269 /// `uninit` is unsafe because there is no guarantee of what its
270 /// contents are. In particular its drop-flag may be set to any
271 /// state, which means it may claim either dropped or
272 /// undropped. In the general case one must use `ptr::write` to
273 /// initialize memory previous set to the result of `uninit`.
1a4d82fc
JJ
274 pub fn uninit<T>() -> T;
275
9346a6ac 276 /// Moves a value out of scope without running drop glue.
1a4d82fc
JJ
277 pub fn forget<T>(_: T) -> ();
278
9e0c209e
SL
279 /// Reinterprets the bits of a value of one type as another type.
280 ///
281 /// Both types must have the same size. Neither the original, nor the result,
282 /// may be an [invalid value](../../nomicon/meet-safe-and-unsafe.html).
1a4d82fc 283 ///
5bcae85e 284 /// `transmute` is semantically equivalent to a bitwise move of one type
9e0c209e
SL
285 /// into another. It copies the bits from the source value into the
286 /// destination value, then forgets the original. It's equivalent to C's
287 /// `memcpy` under the hood, just like `transmute_copy`.
5bcae85e 288 ///
9e0c209e
SL
289 /// `transmute` is **incredibly** unsafe. There are a vast number of ways to
290 /// cause [undefined behavior][ub] with this function. `transmute` should be
5bcae85e
SL
291 /// the absolute last resort.
292 ///
293 /// The [nomicon](../../nomicon/transmutes.html) has additional
294 /// documentation.
1a4d82fc 295 ///
9e0c209e
SL
296 /// [ub]: ../../reference.html#behavior-considered-undefined
297 ///
1a4d82fc
JJ
298 /// # Examples
299 ///
5bcae85e
SL
300 /// There are a few things that `transmute` is really useful for.
301 ///
302 /// Getting the bitpattern of a floating point type (or, more generally,
303 /// type punning, when `T` and `U` aren't pointers):
304 ///
305 /// ```
306 /// let bitpattern = unsafe {
307 /// std::mem::transmute::<f32, u32>(1.0)
308 /// };
309 /// assert_eq!(bitpattern, 0x3F800000);
310 /// ```
311 ///
9e0c209e
SL
312 /// Turning a pointer into a function pointer. This is *not* portable to
313 /// machines where function pointers and data pointers have different sizes.
5bcae85e
SL
314 ///
315 /// ```
316 /// fn foo() -> i32 {
317 /// 0
318 /// }
319 /// let pointer = foo as *const ();
320 /// let function = unsafe {
321 /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
322 /// };
323 /// assert_eq!(function(), 0);
324 /// ```
325 ///
9e0c209e
SL
326 /// Extending a lifetime, or shortening an invariant lifetime. This is
327 /// advanced, very unsafe Rust!
5bcae85e
SL
328 ///
329 /// ```
330 /// struct R<'a>(&'a i32);
331 /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
332 /// std::mem::transmute::<R<'b>, R<'static>>(r)
333 /// }
334 ///
335 /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
336 /// -> &'b mut R<'c> {
337 /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
338 /// }
339 /// ```
340 ///
341 /// # Alternatives
342 ///
9e0c209e
SL
343 /// Don't despair: many uses of `transmute` can be achieved through other means.
344 /// Below are common applications of `transmute` which can be replaced with safer
345 /// constructs.
5bcae85e
SL
346 ///
347 /// Turning a pointer into a `usize`:
348 ///
349 /// ```
350 /// let ptr = &0;
351 /// let ptr_num_transmute = unsafe {
352 /// std::mem::transmute::<&i32, usize>(ptr)
353 /// };
9e0c209e 354 ///
5bcae85e
SL
355 /// // Use an `as` cast instead
356 /// let ptr_num_cast = ptr as *const i32 as usize;
357 /// ```
358 ///
359 /// Turning a `*mut T` into an `&mut T`:
360 ///
361 /// ```
362 /// let ptr: *mut i32 = &mut 0;
363 /// let ref_transmuted = unsafe {
364 /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
365 /// };
9e0c209e 366 ///
5bcae85e
SL
367 /// // Use a reborrow instead
368 /// let ref_casted = unsafe { &mut *ptr };
369 /// ```
370 ///
371 /// Turning an `&mut T` into an `&mut U`:
372 ///
373 /// ```
374 /// let ptr = &mut 0;
375 /// let val_transmuted = unsafe {
376 /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
377 /// };
9e0c209e 378 ///
5bcae85e
SL
379 /// // Now, put together `as` and reborrowing - note the chaining of `as`
380 /// // `as` is not transitive
381 /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
382 /// ```
383 ///
384 /// Turning an `&str` into an `&[u8]`:
385 ///
386 /// ```
387 /// // this is not a good way to do this.
388 /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
389 /// assert_eq!(slice, &[82, 117, 115, 116]);
9e0c209e 390 ///
5bcae85e
SL
391 /// // You could use `str::as_bytes`
392 /// let slice = "Rust".as_bytes();
393 /// assert_eq!(slice, &[82, 117, 115, 116]);
9e0c209e 394 ///
5bcae85e
SL
395 /// // Or, just use a byte string, if you have control over the string
396 /// // literal
397 /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
398 /// ```
399 ///
400 /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`:
401 ///
85aaf69f 402 /// ```
5bcae85e
SL
403 /// let store = [0, 1, 2, 3];
404 /// let mut v_orig = store.iter().collect::<Vec<&i32>>();
9e0c209e 405 ///
5bcae85e
SL
406 /// // Using transmute: this is Undefined Behavior, and a bad idea.
407 /// // However, it is no-copy.
408 /// let v_transmuted = unsafe {
409 /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(
410 /// v_orig.clone())
411 /// };
9e0c209e 412 ///
5bcae85e 413 /// // This is the suggested, safe way.
9e0c209e 414 /// // It does copy the entire vector, though, into a new array.
5bcae85e
SL
415 /// let v_collected = v_orig.clone()
416 /// .into_iter()
417 /// .map(|r| Some(r))
418 /// .collect::<Vec<Option<&i32>>>();
9e0c209e 419 ///
5bcae85e
SL
420 /// // The no-copy, unsafe way, still using transmute, but not UB.
421 /// // This is equivalent to the original, but safer, and reuses the
422 /// // same Vec internals. Therefore the new inner type must have the
423 /// // exact same size, and the same or lesser alignment, as the old
424 /// // type. The same caveats exist for this method as transmute, for
425 /// // the original inner type (`&i32`) to the converted inner type
426 /// // (`Option<&i32>`), so read the nomicon pages linked above.
427 /// let v_from_raw = unsafe {
428 /// Vec::from_raw_parts(v_orig.as_mut_ptr(),
429 /// v_orig.len(),
430 /// v_orig.capacity())
431 /// };
432 /// std::mem::forget(v_orig);
433 /// ```
434 ///
435 /// Implementing `split_at_mut`:
1a4d82fc 436 ///
5bcae85e
SL
437 /// ```
438 /// use std::{slice, mem};
9e0c209e 439 ///
5bcae85e
SL
440 /// // There are multiple ways to do this; and there are multiple problems
441 /// // with the following, transmute, way.
442 /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
443 /// -> (&mut [T], &mut [T]) {
444 /// let len = slice.len();
445 /// assert!(mid <= len);
446 /// unsafe {
447 /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
448 /// // first: transmute is not typesafe; all it checks is that T and
449 /// // U are of the same size. Second, right here, you have two
450 /// // mutable references pointing to the same memory.
451 /// (&mut slice[0..mid], &mut slice2[mid..len])
452 /// }
453 /// }
9e0c209e 454 ///
5bcae85e
SL
455 /// // This gets rid of the typesafety problems; `&mut *` will *only* give
456 /// // you an `&mut T` from an `&mut T` or `*mut T`.
457 /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
458 /// -> (&mut [T], &mut [T]) {
459 /// let len = slice.len();
460 /// assert!(mid <= len);
461 /// unsafe {
462 /// let slice2 = &mut *(slice as *mut [T]);
463 /// // however, you still have two mutable references pointing to
464 /// // the same memory.
465 /// (&mut slice[0..mid], &mut slice2[mid..len])
466 /// }
467 /// }
9e0c209e 468 ///
5bcae85e
SL
469 /// // This is how the standard library does it. This is the best method, if
470 /// // you need to do something like this
471 /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
472 /// -> (&mut [T], &mut [T]) {
473 /// let len = slice.len();
474 /// assert!(mid <= len);
475 /// unsafe {
476 /// let ptr = slice.as_mut_ptr();
477 /// // This now has three mutable references pointing at the same
478 /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
479 /// // `slice` is never used after `let ptr = ...`, and so one can
480 /// // treat it as "dead", and therefore, you only have two real
481 /// // mutable slices.
482 /// (slice::from_raw_parts_mut(ptr, mid),
483 /// slice::from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
484 /// }
485 /// }
1a4d82fc 486 /// ```
85aaf69f 487 #[stable(feature = "rust1", since = "1.0.0")]
e9174d1e 488 pub fn transmute<T, U>(e: T) -> U;
1a4d82fc 489
c34b1796
AL
490 /// Returns `true` if the actual type given as `T` requires drop
491 /// glue; returns `false` if the actual type provided for `T`
492 /// implements `Copy`.
493 ///
494 /// If the actual type neither requires drop glue nor implements
495 /// `Copy`, then may return `true` or `false`.
1a4d82fc
JJ
496 pub fn needs_drop<T>() -> bool;
497
d9579d0f 498 /// Calculates the offset from a pointer.
1a4d82fc
JJ
499 ///
500 /// This is implemented as an intrinsic to avoid converting to and from an
501 /// integer, since the conversion would throw away aliasing information.
d9579d0f
AL
502 ///
503 /// # Safety
504 ///
505 /// Both the starting and resulting pointer must be either in bounds or one
506 /// byte past the end of an allocated object. If either pointer is out of
507 /// bounds or arithmetic overflow occurs then any further use of the
508 /// returned value will result in undefined behavior.
85aaf69f 509 pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
1a4d82fc 510
62682a34
SL
511 /// Calculates the offset from a pointer, potentially wrapping.
512 ///
513 /// This is implemented as an intrinsic to avoid converting to and from an
514 /// integer, since the conversion inhibits certain optimizations.
515 ///
516 /// # Safety
517 ///
518 /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
519 /// resulting pointer to point into or one byte past the end of an allocated
520 /// object, and it wraps with two's complement arithmetic. The resulting
521 /// value is not necessarily valid to be used to actually access memory.
522 pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
523
1a4d82fc
JJ
524 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
525 /// and destination may *not* overlap.
526 ///
c34b1796 527 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1a4d82fc
JJ
528 ///
529 /// # Safety
530 ///
85aaf69f 531 /// Beyond requiring that the program must be allowed to access both regions
b039eaaf 532 /// of memory, it is Undefined Behavior for source and destination to
85aaf69f
SL
533 /// overlap. Care must also be taken with the ownership of `src` and
534 /// `dst`. This method semantically moves the values of `src` into `dst`.
535 /// However it does not drop the contents of `dst`, or prevent the contents
536 /// of `src` from being dropped or used.
1a4d82fc
JJ
537 ///
538 /// # Examples
539 ///
540 /// A safe swap function:
541 ///
542 /// ```
543 /// use std::mem;
544 /// use std::ptr;
545 ///
92a42be0 546 /// # #[allow(dead_code)]
1a4d82fc
JJ
547 /// fn swap<T>(x: &mut T, y: &mut T) {
548 /// unsafe {
549 /// // Give ourselves some scratch space to work with
550 /// let mut t: T = mem::uninitialized();
551 ///
552 /// // Perform the swap, `&mut` pointers never alias
c34b1796
AL
553 /// ptr::copy_nonoverlapping(x, &mut t, 1);
554 /// ptr::copy_nonoverlapping(y, x, 1);
555 /// ptr::copy_nonoverlapping(&t, y, 1);
1a4d82fc
JJ
556 ///
557 /// // y and t now point to the same thing, but we need to completely forget `tmp`
558 /// // because it's no longer relevant.
559 /// mem::forget(t);
560 /// }
561 /// }
562 /// ```
c34b1796 563 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796
AL
564 pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
565
1a4d82fc
JJ
566 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
567 /// and destination may overlap.
568 ///
c34b1796 569 /// `copy` is semantically equivalent to C's `memmove`.
1a4d82fc
JJ
570 ///
571 /// # Safety
572 ///
573 /// Care must be taken with the ownership of `src` and `dst`.
574 /// This method semantically moves the values of `src` into `dst`.
575 /// However it does not drop the contents of `dst`, or prevent the contents of `src`
576 /// from being dropped or used.
577 ///
578 /// # Examples
579 ///
580 /// Efficiently create a Rust vector from an unsafe buffer:
581 ///
582 /// ```
583 /// use std::ptr;
584 ///
92a42be0 585 /// # #[allow(dead_code)]
c34b1796 586 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
1a4d82fc
JJ
587 /// let mut dst = Vec::with_capacity(elts);
588 /// dst.set_len(elts);
c34b1796 589 /// ptr::copy(ptr, dst.as_mut_ptr(), elts);
1a4d82fc
JJ
590 /// dst
591 /// }
592 /// ```
593 ///
c34b1796 594 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796
AL
595 pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
596
1a4d82fc 597 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
92a42be0 598 /// bytes of memory starting at `dst` to `val`.
c34b1796
AL
599 #[stable(feature = "rust1", since = "1.0.0")]
600 pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
1a4d82fc
JJ
601
602 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
603 /// a size of `count` * `size_of::<T>()` and an alignment of
604 /// `min_align_of::<T>()`
605 ///
b039eaaf 606 /// The volatile parameter is set to `true`, so it will not be optimized out.
1a4d82fc 607 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
85aaf69f 608 count: usize);
1a4d82fc
JJ
609 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
610 /// a size of `count` * `size_of::<T>()` and an alignment of
611 /// `min_align_of::<T>()`
612 ///
b039eaaf 613 /// The volatile parameter is set to `true`, so it will not be optimized out.
85aaf69f 614 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
1a4d82fc
JJ
615 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
616 /// size of `count` * `size_of::<T>()` and an alignment of
617 /// `min_align_of::<T>()`.
618 ///
b039eaaf 619 /// The volatile parameter is set to `true`, so it will not be optimized out.
85aaf69f 620 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
1a4d82fc
JJ
621
622 /// Perform a volatile load from the `src` pointer.
623 pub fn volatile_load<T>(src: *const T) -> T;
624 /// Perform a volatile store to the `dst` pointer.
625 pub fn volatile_store<T>(dst: *mut T, val: T);
626
627 /// Returns the square root of an `f32`
628 pub fn sqrtf32(x: f32) -> f32;
629 /// Returns the square root of an `f64`
630 pub fn sqrtf64(x: f64) -> f64;
631
632 /// Raises an `f32` to an integer power.
633 pub fn powif32(a: f32, x: i32) -> f32;
634 /// Raises an `f64` to an integer power.
635 pub fn powif64(a: f64, x: i32) -> f64;
636
637 /// Returns the sine of an `f32`.
638 pub fn sinf32(x: f32) -> f32;
639 /// Returns the sine of an `f64`.
640 pub fn sinf64(x: f64) -> f64;
641
642 /// Returns the cosine of an `f32`.
643 pub fn cosf32(x: f32) -> f32;
644 /// Returns the cosine of an `f64`.
645 pub fn cosf64(x: f64) -> f64;
646
647 /// Raises an `f32` to an `f32` power.
648 pub fn powf32(a: f32, x: f32) -> f32;
649 /// Raises an `f64` to an `f64` power.
650 pub fn powf64(a: f64, x: f64) -> f64;
651
652 /// Returns the exponential of an `f32`.
653 pub fn expf32(x: f32) -> f32;
654 /// Returns the exponential of an `f64`.
655 pub fn expf64(x: f64) -> f64;
656
657 /// Returns 2 raised to the power of an `f32`.
658 pub fn exp2f32(x: f32) -> f32;
659 /// Returns 2 raised to the power of an `f64`.
660 pub fn exp2f64(x: f64) -> f64;
661
662 /// Returns the natural logarithm of an `f32`.
663 pub fn logf32(x: f32) -> f32;
664 /// Returns the natural logarithm of an `f64`.
665 pub fn logf64(x: f64) -> f64;
666
667 /// Returns the base 10 logarithm of an `f32`.
668 pub fn log10f32(x: f32) -> f32;
669 /// Returns the base 10 logarithm of an `f64`.
670 pub fn log10f64(x: f64) -> f64;
671
672 /// Returns the base 2 logarithm of an `f32`.
673 pub fn log2f32(x: f32) -> f32;
674 /// Returns the base 2 logarithm of an `f64`.
675 pub fn log2f64(x: f64) -> f64;
676
677 /// Returns `a * b + c` for `f32` values.
678 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
679 /// Returns `a * b + c` for `f64` values.
680 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
681
682 /// Returns the absolute value of an `f32`.
683 pub fn fabsf32(x: f32) -> f32;
684 /// Returns the absolute value of an `f64`.
685 pub fn fabsf64(x: f64) -> f64;
686
687 /// Copies the sign from `y` to `x` for `f32` values.
688 pub fn copysignf32(x: f32, y: f32) -> f32;
689 /// Copies the sign from `y` to `x` for `f64` values.
690 pub fn copysignf64(x: f64, y: f64) -> f64;
691
692 /// Returns the largest integer less than or equal to an `f32`.
693 pub fn floorf32(x: f32) -> f32;
694 /// Returns the largest integer less than or equal to an `f64`.
695 pub fn floorf64(x: f64) -> f64;
696
697 /// Returns the smallest integer greater than or equal to an `f32`.
698 pub fn ceilf32(x: f32) -> f32;
699 /// Returns the smallest integer greater than or equal to an `f64`.
700 pub fn ceilf64(x: f64) -> f64;
701
702 /// Returns the integer part of an `f32`.
703 pub fn truncf32(x: f32) -> f32;
704 /// Returns the integer part of an `f64`.
705 pub fn truncf64(x: f64) -> f64;
706
707 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
708 /// if the argument is not an integer.
709 pub fn rintf32(x: f32) -> f32;
710 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
711 /// if the argument is not an integer.
712 pub fn rintf64(x: f64) -> f64;
713
714 /// Returns the nearest integer to an `f32`.
715 pub fn nearbyintf32(x: f32) -> f32;
716 /// Returns the nearest integer to an `f64`.
717 pub fn nearbyintf64(x: f64) -> f64;
718
719 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
720 pub fn roundf32(x: f32) -> f32;
721 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
722 pub fn roundf64(x: f64) -> f64;
723
54a0048b
SL
724 /// Float addition that allows optimizations based on algebraic rules.
725 /// May assume inputs are finite.
54a0048b
SL
726 pub fn fadd_fast<T>(a: T, b: T) -> T;
727
728 /// Float subtraction that allows optimizations based on algebraic rules.
729 /// May assume inputs are finite.
54a0048b
SL
730 pub fn fsub_fast<T>(a: T, b: T) -> T;
731
732 /// Float multiplication that allows optimizations based on algebraic rules.
733 /// May assume inputs are finite.
54a0048b
SL
734 pub fn fmul_fast<T>(a: T, b: T) -> T;
735
736 /// Float division that allows optimizations based on algebraic rules.
737 /// May assume inputs are finite.
54a0048b
SL
738 pub fn fdiv_fast<T>(a: T, b: T) -> T;
739
740 /// Float remainder that allows optimizations based on algebraic rules.
741 /// May assume inputs are finite.
54a0048b
SL
742 pub fn frem_fast<T>(a: T, b: T) -> T;
743
744
92a42be0 745 /// Returns the number of bits set in an integer type `T`
92a42be0 746 pub fn ctpop<T>(x: T) -> T;
1a4d82fc 747
92a42be0 748 /// Returns the number of leading bits unset in an integer type `T`
92a42be0 749 pub fn ctlz<T>(x: T) -> T;
1a4d82fc 750
92a42be0 751 /// Returns the number of trailing bits unset in an integer type `T`
92a42be0 752 pub fn cttz<T>(x: T) -> T;
1a4d82fc 753
92a42be0 754 /// Reverses the bytes in an integer type `T`.
92a42be0 755 pub fn bswap<T>(x: T) -> T;
1a4d82fc 756
92a42be0 757 /// Performs checked integer addition.
92a42be0
SL
758 pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
759
92a42be0 760 /// Performs checked integer subtraction
92a42be0
SL
761 pub fn sub_with_overflow<T>(x: T, y: T) -> (T, bool);
762
92a42be0 763 /// Performs checked integer multiplication
92a42be0
SL
764 pub fn mul_with_overflow<T>(x: T, y: T) -> (T, bool);
765
766 /// Performs an unchecked division, resulting in undefined behavior
767 /// where y = 0 or x = `T::min_value()` and y = -1
92a42be0
SL
768 pub fn unchecked_div<T>(x: T, y: T) -> T;
769 /// Returns the remainder of an unchecked division, resulting in
770 /// undefined behavior where y = 0 or x = `T::min_value()` and y = -1
92a42be0
SL
771 pub fn unchecked_rem<T>(x: T, y: T) -> T;
772
773 /// Returns (a + b) mod 2^N, where N is the width of T in bits.
c34b1796 774 pub fn overflowing_add<T>(a: T, b: T) -> T;
92a42be0 775 /// Returns (a - b) mod 2^N, where N is the width of T in bits.
c34b1796 776 pub fn overflowing_sub<T>(a: T, b: T) -> T;
92a42be0 777 /// Returns (a * b) mod 2^N, where N is the width of T in bits.
c34b1796 778 pub fn overflowing_mul<T>(a: T, b: T) -> T;
9346a6ac 779
62682a34
SL
780 /// Returns the value of the discriminant for the variant in 'v',
781 /// cast to a `u64`; if `T` has no discriminant, returns 0.
782 pub fn discriminant_value<T>(v: &T) -> u64;
c1a9b12d
SL
783
784 /// Rust's "try catch" construct which invokes the function pointer `f` with
7453a54e
SL
785 /// the data pointer `data`.
786 ///
787 /// The third pointer is a target-specific data pointer which is filled in
788 /// with the specifics of the exception that occurred. For examples on Unix
789 /// platforms this is a `*mut *mut T` which is filled in by the compiler and
790 /// on MSVC it's `*mut [usize; 2]`. For more information see the compiler's
791 /// source as well as std's catch implementation.
792 pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32;
d9579d0f 793}