]> git.proxmox.com Git - rustc.git/blob - src/libcore/intrinsics.rs
New upstream version 1.14.0+dfsg1
[rustc.git] / src / libcore / intrinsics.rs
1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! rustc compiler intrinsics.
12 //!
13 //! The corresponding definitions are in librustc_trans/intrinsic.rs.
14 //!
15 //! # Volatiles
16 //!
17 //! The volatile intrinsics provide operations intended to act on I/O
18 //! memory, which are guaranteed to not be reordered by the compiler
19 //! across other volatile intrinsics. See the LLVM documentation on
20 //! [[volatile]].
21 //!
22 //! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
23 //!
24 //! # Atomics
25 //!
26 //! The atomic intrinsics provide common atomic operations on machine
27 //! words, with multiple possible memory orderings. They obey the same
28 //! semantics as C++11. See the LLVM documentation on [[atomics]].
29 //!
30 //! [atomics]: http://llvm.org/docs/Atomics.html
31 //!
32 //! A quick refresher on memory ordering:
33 //!
34 //! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
35 //! take place after the barrier.
36 //! * Release - a barrier for releasing a lock. Preceding reads and writes
37 //! take place before the barrier.
38 //! * Sequentially consistent - sequentially consistent operations are
39 //! guaranteed to happen in order. This is the standard mode for working
40 //! with atomic types and is equivalent to Java's `volatile`.
41
42 #![unstable(feature = "core_intrinsics",
43 reason = "intrinsics are unlikely to ever be stabilized, instead \
44 they should be used through stabilized interfaces \
45 in the rest of the standard library",
46 issue = "0")]
47 #![allow(missing_docs)]
48
49 extern "rust-intrinsic" {
50
51 // NB: These intrinsics take raw pointers because they mutate aliased
52 // memory, which is not valid for either `&` or `&mut`.
53
54 pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> (T, bool);
55 pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
56 pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
57 pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
58 pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
59 pub fn atomic_cxchg_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
60 pub fn atomic_cxchg_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
61 pub fn atomic_cxchg_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
62 pub fn atomic_cxchg_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
63
64 pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
65 pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
66 pub fn atomic_cxchgweak_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
67 pub fn atomic_cxchgweak_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
68 pub fn atomic_cxchgweak_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
69 pub fn atomic_cxchgweak_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
70 pub fn atomic_cxchgweak_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
71 pub fn atomic_cxchgweak_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
72 pub fn atomic_cxchgweak_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
73
74 pub fn atomic_load<T>(src: *const T) -> T;
75 pub fn atomic_load_acq<T>(src: *const T) -> T;
76 pub fn atomic_load_relaxed<T>(src: *const T) -> T;
77 pub fn atomic_load_unordered<T>(src: *const T) -> T;
78
79 pub fn atomic_store<T>(dst: *mut T, val: T);
80 pub fn atomic_store_rel<T>(dst: *mut T, val: T);
81 pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
82 pub fn atomic_store_unordered<T>(dst: *mut T, val: T);
83
84 pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
85 pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
86 pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
87 pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
88 pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
89
90 pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
91 pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
92 pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
93 pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
94 pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
95
96 pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
97 pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
98 pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
99 pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
100 pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
101
102 pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
103 pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
104 pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
105 pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
106 pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
107
108 pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
109 pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
110 pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
111 pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
112 pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
113
114 pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
115 pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
116 pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
117 pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
118 pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
119
120 pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
121 pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
122 pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
123 pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
124 pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
125
126 pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
127 pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
128 pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
129 pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
130 pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
131
132 pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
133 pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
134 pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
135 pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
136 pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
137
138 pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
139 pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
140 pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
141 pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
142 pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
143
144 pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
145 pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
146 pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
147 pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
148 pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
149 }
150
151 extern "rust-intrinsic" {
152
153 pub fn atomic_fence();
154 pub fn atomic_fence_acq();
155 pub fn atomic_fence_rel();
156 pub fn atomic_fence_acqrel();
157
158 /// A compiler-only memory barrier.
159 ///
160 /// Memory accesses will never be reordered across this barrier by the
161 /// compiler, but no instructions will be emitted for it. This is
162 /// appropriate for operations on the same thread that may be preempted,
163 /// such as when interacting with signal handlers.
164 pub fn atomic_singlethreadfence();
165 pub fn atomic_singlethreadfence_acq();
166 pub fn atomic_singlethreadfence_rel();
167 pub fn atomic_singlethreadfence_acqrel();
168
169 /// Magic intrinsic that derives its meaning from attributes
170 /// attached to the function.
171 ///
172 /// For example, dataflow uses this to inject static assertions so
173 /// that `rustc_peek(potentially_uninitialized)` would actually
174 /// double-check that dataflow did indeed compute that it is
175 /// uninitialized at that point in the control flow.
176 pub fn rustc_peek<T>(_: T) -> T;
177
178 /// Aborts the execution of the process.
179 pub fn abort() -> !;
180
181 /// Tells LLVM that this point in the code is not reachable,
182 /// enabling further optimizations.
183 ///
184 /// NB: This is very different from the `unreachable!()` macro!
185 pub fn unreachable() -> !;
186
187 /// Informs the optimizer that a condition is always true.
188 /// If the condition is false, the behavior is undefined.
189 ///
190 /// No code is generated for this intrinsic, but the optimizer will try
191 /// to preserve it (and its condition) between passes, which may interfere
192 /// with optimization of surrounding code and reduce performance. It should
193 /// not be used if the invariant can be discovered by the optimizer on its
194 /// own, or if it does not enable any significant optimizations.
195 pub fn assume(b: bool);
196
197 /// Hints to the compiler that branch condition is likely to be true.
198 /// Returns the value passed to it.
199 ///
200 /// Any use other than with `if` statements will probably not have an effect.
201 pub fn likely(b: bool) -> bool;
202
203 /// Hints to the compiler that branch condition is likely to be false.
204 /// Returns the value passed to it.
205 ///
206 /// Any use other than with `if` statements will probably not have an effect.
207 pub fn unlikely(b: bool) -> bool;
208
209 /// Executes a breakpoint trap, for inspection by a debugger.
210 pub fn breakpoint();
211
212 /// The size of a type in bytes.
213 ///
214 /// More specifically, this is the offset in bytes between successive
215 /// items of the same type, including alignment padding.
216 pub fn size_of<T>() -> usize;
217
218 /// Moves a value to an uninitialized memory location.
219 ///
220 /// Drop glue is not run on the destination.
221 pub fn move_val_init<T>(dst: *mut T, src: T);
222
223 pub fn min_align_of<T>() -> usize;
224 pub fn pref_align_of<T>() -> usize;
225
226 pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
227 pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
228
229 /// Executes the destructor (if any) of the pointed-to value.
230 ///
231 /// This has two use cases:
232 ///
233 /// * It is *required* to use `drop_in_place` to drop unsized types like
234 /// trait objects, because they can't be read out onto the stack and
235 /// dropped normally.
236 ///
237 /// * It is friendlier to the optimizer to do this over `ptr::read` when
238 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
239 /// as the compiler doesn't need to prove that it's sound to elide the
240 /// copy.
241 ///
242 /// # Undefined Behavior
243 ///
244 /// This has all the same safety problems as `ptr::read` with respect to
245 /// invalid pointers, types, and double drops.
246 #[stable(feature = "drop_in_place", since = "1.8.0")]
247 pub fn drop_in_place<T: ?Sized>(to_drop: *mut T);
248
249 /// Gets a static string slice containing the name of a type.
250 pub fn type_name<T: ?Sized>() -> &'static str;
251
252 /// Gets an identifier which is globally unique to the specified type. This
253 /// function will return the same value for a type regardless of whichever
254 /// crate it is invoked in.
255 pub fn type_id<T: ?Sized + 'static>() -> u64;
256
257 /// Creates a value initialized to zero.
258 ///
259 /// `init` is unsafe because it returns a zeroed-out datum,
260 /// which is unsafe unless T is `Copy`. Also, even if T is
261 /// `Copy`, an all-zero value may not correspond to any legitimate
262 /// state for the type in question.
263 pub fn init<T>() -> T;
264
265 /// Creates an uninitialized value.
266 ///
267 /// `uninit` is unsafe because there is no guarantee of what its
268 /// contents are. In particular its drop-flag may be set to any
269 /// state, which means it may claim either dropped or
270 /// undropped. In the general case one must use `ptr::write` to
271 /// initialize memory previous set to the result of `uninit`.
272 pub fn uninit<T>() -> T;
273
274 /// Moves a value out of scope without running drop glue.
275 pub fn forget<T>(_: T) -> ();
276
277 /// Reinterprets the bits of a value of one type as another type.
278 ///
279 /// Both types must have the same size. Neither the original, nor the result,
280 /// may be an [invalid value](../../nomicon/meet-safe-and-unsafe.html).
281 ///
282 /// `transmute` is semantically equivalent to a bitwise move of one type
283 /// into another. It copies the bits from the source value into the
284 /// destination value, then forgets the original. It's equivalent to C's
285 /// `memcpy` under the hood, just like `transmute_copy`.
286 ///
287 /// `transmute` is **incredibly** unsafe. There are a vast number of ways to
288 /// cause [undefined behavior][ub] with this function. `transmute` should be
289 /// the absolute last resort.
290 ///
291 /// The [nomicon](../../nomicon/transmutes.html) has additional
292 /// documentation.
293 ///
294 /// [ub]: ../../reference.html#behavior-considered-undefined
295 ///
296 /// # Examples
297 ///
298 /// There are a few things that `transmute` is really useful for.
299 ///
300 /// Getting the bitpattern of a floating point type (or, more generally,
301 /// type punning, when `T` and `U` aren't pointers):
302 ///
303 /// ```
304 /// let bitpattern = unsafe {
305 /// std::mem::transmute::<f32, u32>(1.0)
306 /// };
307 /// assert_eq!(bitpattern, 0x3F800000);
308 /// ```
309 ///
310 /// Turning a pointer into a function pointer. This is *not* portable to
311 /// machines where function pointers and data pointers have different sizes.
312 ///
313 /// ```
314 /// fn foo() -> i32 {
315 /// 0
316 /// }
317 /// let pointer = foo as *const ();
318 /// let function = unsafe {
319 /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
320 /// };
321 /// assert_eq!(function(), 0);
322 /// ```
323 ///
324 /// Extending a lifetime, or shortening an invariant lifetime. This is
325 /// advanced, very unsafe Rust!
326 ///
327 /// ```
328 /// struct R<'a>(&'a i32);
329 /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
330 /// std::mem::transmute::<R<'b>, R<'static>>(r)
331 /// }
332 ///
333 /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
334 /// -> &'b mut R<'c> {
335 /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
336 /// }
337 /// ```
338 ///
339 /// # Alternatives
340 ///
341 /// Don't despair: many uses of `transmute` can be achieved through other means.
342 /// Below are common applications of `transmute` which can be replaced with safer
343 /// constructs.
344 ///
345 /// Turning a pointer into a `usize`:
346 ///
347 /// ```
348 /// let ptr = &0;
349 /// let ptr_num_transmute = unsafe {
350 /// std::mem::transmute::<&i32, usize>(ptr)
351 /// };
352 ///
353 /// // Use an `as` cast instead
354 /// let ptr_num_cast = ptr as *const i32 as usize;
355 /// ```
356 ///
357 /// Turning a `*mut T` into an `&mut T`:
358 ///
359 /// ```
360 /// let ptr: *mut i32 = &mut 0;
361 /// let ref_transmuted = unsafe {
362 /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
363 /// };
364 ///
365 /// // Use a reborrow instead
366 /// let ref_casted = unsafe { &mut *ptr };
367 /// ```
368 ///
369 /// Turning an `&mut T` into an `&mut U`:
370 ///
371 /// ```
372 /// let ptr = &mut 0;
373 /// let val_transmuted = unsafe {
374 /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
375 /// };
376 ///
377 /// // Now, put together `as` and reborrowing - note the chaining of `as`
378 /// // `as` is not transitive
379 /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
380 /// ```
381 ///
382 /// Turning an `&str` into an `&[u8]`:
383 ///
384 /// ```
385 /// // this is not a good way to do this.
386 /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
387 /// assert_eq!(slice, &[82, 117, 115, 116]);
388 ///
389 /// // You could use `str::as_bytes`
390 /// let slice = "Rust".as_bytes();
391 /// assert_eq!(slice, &[82, 117, 115, 116]);
392 ///
393 /// // Or, just use a byte string, if you have control over the string
394 /// // literal
395 /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
396 /// ```
397 ///
398 /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`:
399 ///
400 /// ```
401 /// let store = [0, 1, 2, 3];
402 /// let mut v_orig = store.iter().collect::<Vec<&i32>>();
403 ///
404 /// // Using transmute: this is Undefined Behavior, and a bad idea.
405 /// // However, it is no-copy.
406 /// let v_transmuted = unsafe {
407 /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(
408 /// v_orig.clone())
409 /// };
410 ///
411 /// // This is the suggested, safe way.
412 /// // It does copy the entire vector, though, into a new array.
413 /// let v_collected = v_orig.clone()
414 /// .into_iter()
415 /// .map(|r| Some(r))
416 /// .collect::<Vec<Option<&i32>>>();
417 ///
418 /// // The no-copy, unsafe way, still using transmute, but not UB.
419 /// // This is equivalent to the original, but safer, and reuses the
420 /// // same Vec internals. Therefore the new inner type must have the
421 /// // exact same size, and the same or lesser alignment, as the old
422 /// // type. The same caveats exist for this method as transmute, for
423 /// // the original inner type (`&i32`) to the converted inner type
424 /// // (`Option<&i32>`), so read the nomicon pages linked above.
425 /// let v_from_raw = unsafe {
426 /// Vec::from_raw_parts(v_orig.as_mut_ptr(),
427 /// v_orig.len(),
428 /// v_orig.capacity())
429 /// };
430 /// std::mem::forget(v_orig);
431 /// ```
432 ///
433 /// Implementing `split_at_mut`:
434 ///
435 /// ```
436 /// use std::{slice, mem};
437 ///
438 /// // There are multiple ways to do this; and there are multiple problems
439 /// // with the following, transmute, way.
440 /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
441 /// -> (&mut [T], &mut [T]) {
442 /// let len = slice.len();
443 /// assert!(mid <= len);
444 /// unsafe {
445 /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
446 /// // first: transmute is not typesafe; all it checks is that T and
447 /// // U are of the same size. Second, right here, you have two
448 /// // mutable references pointing to the same memory.
449 /// (&mut slice[0..mid], &mut slice2[mid..len])
450 /// }
451 /// }
452 ///
453 /// // This gets rid of the typesafety problems; `&mut *` will *only* give
454 /// // you an `&mut T` from an `&mut T` or `*mut T`.
455 /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
456 /// -> (&mut [T], &mut [T]) {
457 /// let len = slice.len();
458 /// assert!(mid <= len);
459 /// unsafe {
460 /// let slice2 = &mut *(slice as *mut [T]);
461 /// // however, you still have two mutable references pointing to
462 /// // the same memory.
463 /// (&mut slice[0..mid], &mut slice2[mid..len])
464 /// }
465 /// }
466 ///
467 /// // This is how the standard library does it. This is the best method, if
468 /// // you need to do something like this
469 /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
470 /// -> (&mut [T], &mut [T]) {
471 /// let len = slice.len();
472 /// assert!(mid <= len);
473 /// unsafe {
474 /// let ptr = slice.as_mut_ptr();
475 /// // This now has three mutable references pointing at the same
476 /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
477 /// // `slice` is never used after `let ptr = ...`, and so one can
478 /// // treat it as "dead", and therefore, you only have two real
479 /// // mutable slices.
480 /// (slice::from_raw_parts_mut(ptr, mid),
481 /// slice::from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
482 /// }
483 /// }
484 /// ```
485 #[stable(feature = "rust1", since = "1.0.0")]
486 pub fn transmute<T, U>(e: T) -> U;
487
488 /// Returns `true` if the actual type given as `T` requires drop
489 /// glue; returns `false` if the actual type provided for `T`
490 /// implements `Copy`.
491 ///
492 /// If the actual type neither requires drop glue nor implements
493 /// `Copy`, then may return `true` or `false`.
494 pub fn needs_drop<T>() -> bool;
495
496 /// Calculates the offset from a pointer.
497 ///
498 /// This is implemented as an intrinsic to avoid converting to and from an
499 /// integer, since the conversion would throw away aliasing information.
500 ///
501 /// # Safety
502 ///
503 /// Both the starting and resulting pointer must be either in bounds or one
504 /// byte past the end of an allocated object. If either pointer is out of
505 /// bounds or arithmetic overflow occurs then any further use of the
506 /// returned value will result in undefined behavior.
507 pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
508
509 /// Calculates the offset from a pointer, potentially wrapping.
510 ///
511 /// This is implemented as an intrinsic to avoid converting to and from an
512 /// integer, since the conversion inhibits certain optimizations.
513 ///
514 /// # Safety
515 ///
516 /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
517 /// resulting pointer to point into or one byte past the end of an allocated
518 /// object, and it wraps with two's complement arithmetic. The resulting
519 /// value is not necessarily valid to be used to actually access memory.
520 pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
521
522 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
523 /// and destination may *not* overlap.
524 ///
525 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
526 ///
527 /// # Safety
528 ///
529 /// Beyond requiring that the program must be allowed to access both regions
530 /// of memory, it is Undefined Behavior for source and destination to
531 /// overlap. Care must also be taken with the ownership of `src` and
532 /// `dst`. This method semantically moves the values of `src` into `dst`.
533 /// However it does not drop the contents of `dst`, or prevent the contents
534 /// of `src` from being dropped or used.
535 ///
536 /// # Examples
537 ///
538 /// A safe swap function:
539 ///
540 /// ```
541 /// use std::mem;
542 /// use std::ptr;
543 ///
544 /// # #[allow(dead_code)]
545 /// fn swap<T>(x: &mut T, y: &mut T) {
546 /// unsafe {
547 /// // Give ourselves some scratch space to work with
548 /// let mut t: T = mem::uninitialized();
549 ///
550 /// // Perform the swap, `&mut` pointers never alias
551 /// ptr::copy_nonoverlapping(x, &mut t, 1);
552 /// ptr::copy_nonoverlapping(y, x, 1);
553 /// ptr::copy_nonoverlapping(&t, y, 1);
554 ///
555 /// // y and t now point to the same thing, but we need to completely forget `tmp`
556 /// // because it's no longer relevant.
557 /// mem::forget(t);
558 /// }
559 /// }
560 /// ```
561 #[stable(feature = "rust1", since = "1.0.0")]
562 pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
563
564 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
565 /// and destination may overlap.
566 ///
567 /// `copy` is semantically equivalent to C's `memmove`.
568 ///
569 /// # Safety
570 ///
571 /// Care must be taken with the ownership of `src` and `dst`.
572 /// This method semantically moves the values of `src` into `dst`.
573 /// However it does not drop the contents of `dst`, or prevent the contents of `src`
574 /// from being dropped or used.
575 ///
576 /// # Examples
577 ///
578 /// Efficiently create a Rust vector from an unsafe buffer:
579 ///
580 /// ```
581 /// use std::ptr;
582 ///
583 /// # #[allow(dead_code)]
584 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
585 /// let mut dst = Vec::with_capacity(elts);
586 /// dst.set_len(elts);
587 /// ptr::copy(ptr, dst.as_mut_ptr(), elts);
588 /// dst
589 /// }
590 /// ```
591 ///
592 #[stable(feature = "rust1", since = "1.0.0")]
593 pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
594
595 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
596 /// bytes of memory starting at `dst` to `val`.
597 ///
598 /// # Examples
599 ///
600 /// ```
601 /// use std::ptr;
602 ///
603 /// let mut vec = vec![0; 4];
604 /// unsafe {
605 /// let vec_ptr = vec.as_mut_ptr();
606 /// ptr::write_bytes(vec_ptr, b'a', 2);
607 /// }
608 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
609 /// ```
610 #[stable(feature = "rust1", since = "1.0.0")]
611 pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
612
613 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
614 /// a size of `count` * `size_of::<T>()` and an alignment of
615 /// `min_align_of::<T>()`
616 ///
617 /// The volatile parameter is set to `true`, so it will not be optimized out.
618 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
619 count: usize);
620 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
621 /// a size of `count` * `size_of::<T>()` and an alignment of
622 /// `min_align_of::<T>()`
623 ///
624 /// The volatile parameter is set to `true`, so it will not be optimized out.
625 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
626 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
627 /// size of `count` * `size_of::<T>()` and an alignment of
628 /// `min_align_of::<T>()`.
629 ///
630 /// The volatile parameter is set to `true`, so it will not be optimized out.
631 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
632
633 /// Perform a volatile load from the `src` pointer.
634 pub fn volatile_load<T>(src: *const T) -> T;
635 /// Perform a volatile store to the `dst` pointer.
636 pub fn volatile_store<T>(dst: *mut T, val: T);
637
638 /// Returns the square root of an `f32`
639 pub fn sqrtf32(x: f32) -> f32;
640 /// Returns the square root of an `f64`
641 pub fn sqrtf64(x: f64) -> f64;
642
643 /// Raises an `f32` to an integer power.
644 pub fn powif32(a: f32, x: i32) -> f32;
645 /// Raises an `f64` to an integer power.
646 pub fn powif64(a: f64, x: i32) -> f64;
647
648 /// Returns the sine of an `f32`.
649 pub fn sinf32(x: f32) -> f32;
650 /// Returns the sine of an `f64`.
651 pub fn sinf64(x: f64) -> f64;
652
653 /// Returns the cosine of an `f32`.
654 pub fn cosf32(x: f32) -> f32;
655 /// Returns the cosine of an `f64`.
656 pub fn cosf64(x: f64) -> f64;
657
658 /// Raises an `f32` to an `f32` power.
659 pub fn powf32(a: f32, x: f32) -> f32;
660 /// Raises an `f64` to an `f64` power.
661 pub fn powf64(a: f64, x: f64) -> f64;
662
663 /// Returns the exponential of an `f32`.
664 pub fn expf32(x: f32) -> f32;
665 /// Returns the exponential of an `f64`.
666 pub fn expf64(x: f64) -> f64;
667
668 /// Returns 2 raised to the power of an `f32`.
669 pub fn exp2f32(x: f32) -> f32;
670 /// Returns 2 raised to the power of an `f64`.
671 pub fn exp2f64(x: f64) -> f64;
672
673 /// Returns the natural logarithm of an `f32`.
674 pub fn logf32(x: f32) -> f32;
675 /// Returns the natural logarithm of an `f64`.
676 pub fn logf64(x: f64) -> f64;
677
678 /// Returns the base 10 logarithm of an `f32`.
679 pub fn log10f32(x: f32) -> f32;
680 /// Returns the base 10 logarithm of an `f64`.
681 pub fn log10f64(x: f64) -> f64;
682
683 /// Returns the base 2 logarithm of an `f32`.
684 pub fn log2f32(x: f32) -> f32;
685 /// Returns the base 2 logarithm of an `f64`.
686 pub fn log2f64(x: f64) -> f64;
687
688 /// Returns `a * b + c` for `f32` values.
689 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
690 /// Returns `a * b + c` for `f64` values.
691 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
692
693 /// Returns the absolute value of an `f32`.
694 pub fn fabsf32(x: f32) -> f32;
695 /// Returns the absolute value of an `f64`.
696 pub fn fabsf64(x: f64) -> f64;
697
698 /// Copies the sign from `y` to `x` for `f32` values.
699 pub fn copysignf32(x: f32, y: f32) -> f32;
700 /// Copies the sign from `y` to `x` for `f64` values.
701 pub fn copysignf64(x: f64, y: f64) -> f64;
702
703 /// Returns the largest integer less than or equal to an `f32`.
704 pub fn floorf32(x: f32) -> f32;
705 /// Returns the largest integer less than or equal to an `f64`.
706 pub fn floorf64(x: f64) -> f64;
707
708 /// Returns the smallest integer greater than or equal to an `f32`.
709 pub fn ceilf32(x: f32) -> f32;
710 /// Returns the smallest integer greater than or equal to an `f64`.
711 pub fn ceilf64(x: f64) -> f64;
712
713 /// Returns the integer part of an `f32`.
714 pub fn truncf32(x: f32) -> f32;
715 /// Returns the integer part of an `f64`.
716 pub fn truncf64(x: f64) -> f64;
717
718 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
719 /// if the argument is not an integer.
720 pub fn rintf32(x: f32) -> f32;
721 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
722 /// if the argument is not an integer.
723 pub fn rintf64(x: f64) -> f64;
724
725 /// Returns the nearest integer to an `f32`.
726 pub fn nearbyintf32(x: f32) -> f32;
727 /// Returns the nearest integer to an `f64`.
728 pub fn nearbyintf64(x: f64) -> f64;
729
730 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
731 pub fn roundf32(x: f32) -> f32;
732 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
733 pub fn roundf64(x: f64) -> f64;
734
735 /// Float addition that allows optimizations based on algebraic rules.
736 /// May assume inputs are finite.
737 pub fn fadd_fast<T>(a: T, b: T) -> T;
738
739 /// Float subtraction that allows optimizations based on algebraic rules.
740 /// May assume inputs are finite.
741 pub fn fsub_fast<T>(a: T, b: T) -> T;
742
743 /// Float multiplication that allows optimizations based on algebraic rules.
744 /// May assume inputs are finite.
745 pub fn fmul_fast<T>(a: T, b: T) -> T;
746
747 /// Float division that allows optimizations based on algebraic rules.
748 /// May assume inputs are finite.
749 pub fn fdiv_fast<T>(a: T, b: T) -> T;
750
751 /// Float remainder that allows optimizations based on algebraic rules.
752 /// May assume inputs are finite.
753 pub fn frem_fast<T>(a: T, b: T) -> T;
754
755
756 /// Returns the number of bits set in an integer type `T`
757 pub fn ctpop<T>(x: T) -> T;
758
759 /// Returns the number of leading bits unset in an integer type `T`
760 pub fn ctlz<T>(x: T) -> T;
761
762 /// Returns the number of trailing bits unset in an integer type `T`
763 pub fn cttz<T>(x: T) -> T;
764
765 /// Reverses the bytes in an integer type `T`.
766 pub fn bswap<T>(x: T) -> T;
767
768 /// Performs checked integer addition.
769 pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
770
771 /// Performs checked integer subtraction
772 pub fn sub_with_overflow<T>(x: T, y: T) -> (T, bool);
773
774 /// Performs checked integer multiplication
775 pub fn mul_with_overflow<T>(x: T, y: T) -> (T, bool);
776
777 /// Performs an unchecked division, resulting in undefined behavior
778 /// where y = 0 or x = `T::min_value()` and y = -1
779 pub fn unchecked_div<T>(x: T, y: T) -> T;
780 /// Returns the remainder of an unchecked division, resulting in
781 /// undefined behavior where y = 0 or x = `T::min_value()` and y = -1
782 pub fn unchecked_rem<T>(x: T, y: T) -> T;
783
784 /// Returns (a + b) mod 2^N, where N is the width of T in bits.
785 pub fn overflowing_add<T>(a: T, b: T) -> T;
786 /// Returns (a - b) mod 2^N, where N is the width of T in bits.
787 pub fn overflowing_sub<T>(a: T, b: T) -> T;
788 /// Returns (a * b) mod 2^N, where N is the width of T in bits.
789 pub fn overflowing_mul<T>(a: T, b: T) -> T;
790
791 /// Returns the value of the discriminant for the variant in 'v',
792 /// cast to a `u64`; if `T` has no discriminant, returns 0.
793 pub fn discriminant_value<T>(v: &T) -> u64;
794
795 /// Rust's "try catch" construct which invokes the function pointer `f` with
796 /// the data pointer `data`.
797 ///
798 /// The third pointer is a target-specific data pointer which is filled in
799 /// with the specifics of the exception that occurred. For examples on Unix
800 /// platforms this is a `*mut *mut T` which is filled in by the compiler and
801 /// on MSVC it's `*mut [usize; 2]`. For more information see the compiler's
802 /// source as well as std's catch implementation.
803 pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32;
804 }