]> git.proxmox.com Git - rustc.git/blame - src/libcore/intrinsics.rs
Imported Upstream version 1.1.0+dfsg1
[rustc.git] / src / libcore / intrinsics.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! rustc compiler intrinsics.
12//!
13//! The corresponding definitions are in librustc_trans/trans/intrinsic.rs.
14//!
15//! # Volatiles
16//!
17//! The volatile intrinsics provide operations intended to act on I/O
18//! memory, which are guaranteed to not be reordered by the compiler
19//! across other volatile intrinsics. See the LLVM documentation on
20//! [[volatile]].
21//!
22//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
23//!
24//! # Atomics
25//!
26//! The atomic intrinsics provide common atomic operations on machine
27//! words, with multiple possible memory orderings. They obey the same
28//! semantics as C++11. See the LLVM documentation on [[atomics]].
29//!
30//! [atomics]: http://llvm.org/docs/Atomics.html
31//!
32//! A quick refresher on memory ordering:
33//!
34//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
35//! take place after the barrier.
36//! * Release - a barrier for releasing a lock. Preceding reads and writes
37//! take place before the barrier.
38//! * Sequentially consistent - sequentially consistent operations are
39//! guaranteed to happen in order. This is the standard mode for working
40//! with atomic types and is equivalent to Java's `volatile`.
41
85aaf69f 42#![unstable(feature = "core")]
1a4d82fc
JJ
43#![allow(missing_docs)]
44
1a4d82fc
JJ
45use marker::Sized;
46
1a4d82fc
JJ
47extern "rust-intrinsic" {
48
49 // NB: These intrinsics take unsafe pointers because they mutate aliased
50 // memory, which is not valid for either `&` or `&mut`.
51
52 pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
53 pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
54 pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
55 pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
56 pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
57
58 pub fn atomic_load<T>(src: *const T) -> T;
59 pub fn atomic_load_acq<T>(src: *const T) -> T;
60 pub fn atomic_load_relaxed<T>(src: *const T) -> T;
61 pub fn atomic_load_unordered<T>(src: *const T) -> T;
62
63 pub fn atomic_store<T>(dst: *mut T, val: T);
64 pub fn atomic_store_rel<T>(dst: *mut T, val: T);
65 pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
66 pub fn atomic_store_unordered<T>(dst: *mut T, val: T);
67
68 pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
69 pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
70 pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
71 pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
72 pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
73
74 pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
75 pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
76 pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
77 pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
78 pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
79
80 pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
81 pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
82 pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
83 pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
84 pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
85
86 pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
87 pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
88 pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
89 pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
90 pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
91
92 pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
93 pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
94 pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
95 pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
96 pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
97
98 pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
99 pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
100 pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
101 pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
102 pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
103
104 pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
105 pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
106 pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
107 pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
108 pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
109
110 pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
111 pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
112 pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
113 pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
114 pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
115
116 pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
117 pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
118 pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
119 pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
120 pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
121
122 pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
123 pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
124 pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
125 pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
126 pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
127
128 pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
129 pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
130 pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
131 pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
132 pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
133}
134
135extern "rust-intrinsic" {
136
137 pub fn atomic_fence();
138 pub fn atomic_fence_acq();
139 pub fn atomic_fence_rel();
140 pub fn atomic_fence_acqrel();
141
d9579d0f
AL
142 /// A compiler-only memory barrier.
143 ///
144 /// Memory accesses will never be reordered across this barrier by the compiler,
145 /// but no instructions will be emitted for it. This is appropriate for operations
146 /// on the same thread that may be preempted, such as when interacting with signal
147 /// handlers.
148 #[cfg(not(stage0))] // SNAP 857ef6e
149 pub fn atomic_singlethreadfence();
150 #[cfg(not(stage0))] // SNAP 857ef6e
151 pub fn atomic_singlethreadfence_acq();
152 #[cfg(not(stage0))] // SNAP 857ef6e
153 pub fn atomic_singlethreadfence_rel();
154 #[cfg(not(stage0))] // SNAP 857ef6e
155 pub fn atomic_singlethreadfence_acqrel();
156
9346a6ac 157 /// Aborts the execution of the process.
1a4d82fc
JJ
158 pub fn abort() -> !;
159
9346a6ac 160 /// Tells LLVM that this point in the code is not reachable,
1a4d82fc
JJ
161 /// enabling further optimizations.
162 ///
163 /// NB: This is very different from the `unreachable!()` macro!
164 pub fn unreachable() -> !;
165
9346a6ac 166 /// Informs the optimizer that a condition is always true.
1a4d82fc
JJ
167 /// If the condition is false, the behavior is undefined.
168 ///
169 /// No code is generated for this intrinsic, but the optimizer will try
170 /// to preserve it (and its condition) between passes, which may interfere
171 /// with optimization of surrounding code and reduce performance. It should
172 /// not be used if the invariant can be discovered by the optimizer on its
173 /// own, or if it does not enable any significant optimizations.
174 pub fn assume(b: bool);
175
9346a6ac 176 /// Executes a breakpoint trap, for inspection by a debugger.
1a4d82fc
JJ
177 pub fn breakpoint();
178
179 /// The size of a type in bytes.
180 ///
181 /// This is the exact number of bytes in memory taken up by a
182 /// value of the given type. In other words, a memset of this size
183 /// would *exactly* overwrite a value. When laid out in vectors
184 /// and structures there may be additional padding between
185 /// elements.
85aaf69f 186 pub fn size_of<T>() -> usize;
1a4d82fc 187
9346a6ac 188 /// Moves a value to an uninitialized memory location.
1a4d82fc
JJ
189 ///
190 /// Drop glue is not run on the destination.
191 pub fn move_val_init<T>(dst: &mut T, src: T);
192
85aaf69f
SL
193 pub fn min_align_of<T>() -> usize;
194 pub fn pref_align_of<T>() -> usize;
1a4d82fc 195
d9579d0f
AL
196 #[cfg(not(stage0))]
197 pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
198 #[cfg(not(stage0))]
199 pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
200 #[cfg(not(stage0))]
201 pub fn drop_in_place<T: ?Sized>(_: *mut T);
202
c34b1796
AL
203 /// Gets a static string slice containing the name of a type.
204 pub fn type_name<T: ?Sized>() -> &'static str;
1a4d82fc
JJ
205
206 /// Gets an identifier which is globally unique to the specified type. This
207 /// function will return the same value for a type regardless of whichever
208 /// crate it is invoked in.
85aaf69f 209 pub fn type_id<T: ?Sized + 'static>() -> u64;
1a4d82fc 210
9346a6ac 211 /// Creates a value initialized to so that its drop flag,
c34b1796
AL
212 /// if any, says that it has been dropped.
213 ///
214 /// `init_dropped` is unsafe because it returns a datum with all
215 /// of its bytes set to the drop flag, which generally does not
216 /// correspond to a valid value.
217 ///
218 /// This intrinsic is likely to be deprecated in the future when
219 /// Rust moves to non-zeroing dynamic drop (and thus removes the
220 /// embedded drop flags that are being established by this
221 /// intrinsic).
222 pub fn init_dropped<T>() -> T;
223
9346a6ac 224 /// Creates a value initialized to zero.
1a4d82fc
JJ
225 ///
226 /// `init` is unsafe because it returns a zeroed-out datum,
c34b1796
AL
227 /// which is unsafe unless T is `Copy`. Also, even if T is
228 /// `Copy`, an all-zero value may not correspond to any legitimate
229 /// state for the type in question.
1a4d82fc
JJ
230 pub fn init<T>() -> T;
231
9346a6ac 232 /// Creates an uninitialized value.
c34b1796
AL
233 ///
234 /// `uninit` is unsafe because there is no guarantee of what its
235 /// contents are. In particular its drop-flag may be set to any
236 /// state, which means it may claim either dropped or
237 /// undropped. In the general case one must use `ptr::write` to
238 /// initialize memory previous set to the result of `uninit`.
1a4d82fc
JJ
239 pub fn uninit<T>() -> T;
240
9346a6ac 241 /// Moves a value out of scope without running drop glue.
1a4d82fc
JJ
242 pub fn forget<T>(_: T) -> ();
243
244 /// Unsafely transforms a value of one type into a value of another type.
245 ///
85aaf69f 246 /// Both types must have the same size.
1a4d82fc
JJ
247 ///
248 /// # Examples
249 ///
85aaf69f 250 /// ```
1a4d82fc
JJ
251 /// use std::mem;
252 ///
253 /// let v: &[u8] = unsafe { mem::transmute("L") };
c34b1796 254 /// assert!(v == [76]);
1a4d82fc 255 /// ```
85aaf69f 256 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
257 pub fn transmute<T,U>(e: T) -> U;
258
259 /// Gives the address for the return value of the enclosing function.
260 ///
261 /// Using this intrinsic in a function that does not use an out pointer
262 /// will trigger a compiler error.
263 pub fn return_address() -> *const u8;
264
c34b1796
AL
265 /// Returns `true` if the actual type given as `T` requires drop
266 /// glue; returns `false` if the actual type provided for `T`
267 /// implements `Copy`.
268 ///
269 /// If the actual type neither requires drop glue nor implements
270 /// `Copy`, then may return `true` or `false`.
1a4d82fc
JJ
271 pub fn needs_drop<T>() -> bool;
272
d9579d0f 273 /// Calculates the offset from a pointer.
1a4d82fc
JJ
274 ///
275 /// This is implemented as an intrinsic to avoid converting to and from an
276 /// integer, since the conversion would throw away aliasing information.
d9579d0f
AL
277 ///
278 /// # Safety
279 ///
280 /// Both the starting and resulting pointer must be either in bounds or one
281 /// byte past the end of an allocated object. If either pointer is out of
282 /// bounds or arithmetic overflow occurs then any further use of the
283 /// returned value will result in undefined behavior.
85aaf69f 284 pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
1a4d82fc
JJ
285
286 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
287 /// and destination may *not* overlap.
288 ///
c34b1796 289 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1a4d82fc
JJ
290 ///
291 /// # Safety
292 ///
85aaf69f
SL
293 /// Beyond requiring that the program must be allowed to access both regions
294 /// of memory, it is Undefined Behaviour for source and destination to
295 /// overlap. Care must also be taken with the ownership of `src` and
296 /// `dst`. This method semantically moves the values of `src` into `dst`.
297 /// However it does not drop the contents of `dst`, or prevent the contents
298 /// of `src` from being dropped or used.
1a4d82fc
JJ
299 ///
300 /// # Examples
301 ///
302 /// A safe swap function:
303 ///
304 /// ```
c34b1796 305 /// # #![feature(core)]
1a4d82fc
JJ
306 /// use std::mem;
307 /// use std::ptr;
308 ///
309 /// fn swap<T>(x: &mut T, y: &mut T) {
310 /// unsafe {
311 /// // Give ourselves some scratch space to work with
312 /// let mut t: T = mem::uninitialized();
313 ///
314 /// // Perform the swap, `&mut` pointers never alias
c34b1796
AL
315 /// ptr::copy_nonoverlapping(x, &mut t, 1);
316 /// ptr::copy_nonoverlapping(y, x, 1);
317 /// ptr::copy_nonoverlapping(&t, y, 1);
1a4d82fc
JJ
318 ///
319 /// // y and t now point to the same thing, but we need to completely forget `tmp`
320 /// // because it's no longer relevant.
321 /// mem::forget(t);
322 /// }
323 /// }
324 /// ```
c34b1796 325 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796
AL
326 pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
327
1a4d82fc
JJ
328 /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
329 /// and destination may overlap.
330 ///
c34b1796 331 /// `copy` is semantically equivalent to C's `memmove`.
1a4d82fc
JJ
332 ///
333 /// # Safety
334 ///
335 /// Care must be taken with the ownership of `src` and `dst`.
336 /// This method semantically moves the values of `src` into `dst`.
337 /// However it does not drop the contents of `dst`, or prevent the contents of `src`
338 /// from being dropped or used.
339 ///
340 /// # Examples
341 ///
342 /// Efficiently create a Rust vector from an unsafe buffer:
343 ///
344 /// ```
c34b1796 345 /// # #![feature(core)]
1a4d82fc
JJ
346 /// use std::ptr;
347 ///
c34b1796 348 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
1a4d82fc
JJ
349 /// let mut dst = Vec::with_capacity(elts);
350 /// dst.set_len(elts);
c34b1796 351 /// ptr::copy(ptr, dst.as_mut_ptr(), elts);
1a4d82fc
JJ
352 /// dst
353 /// }
354 /// ```
355 ///
c34b1796 356 #[stable(feature = "rust1", since = "1.0.0")]
c34b1796
AL
357 pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
358
1a4d82fc
JJ
359 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
360 /// bytes of memory starting at `dst` to `c`.
c34b1796
AL
361 #[stable(feature = "rust1", since = "1.0.0")]
362 pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
1a4d82fc
JJ
363
364 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
365 /// a size of `count` * `size_of::<T>()` and an alignment of
366 /// `min_align_of::<T>()`
367 ///
368 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
369 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
85aaf69f 370 count: usize);
1a4d82fc
JJ
371 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
372 /// a size of `count` * `size_of::<T>()` and an alignment of
373 /// `min_align_of::<T>()`
374 ///
375 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
85aaf69f 376 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
1a4d82fc
JJ
377 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
378 /// size of `count` * `size_of::<T>()` and an alignment of
379 /// `min_align_of::<T>()`.
380 ///
381 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
85aaf69f 382 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
1a4d82fc
JJ
383
384 /// Perform a volatile load from the `src` pointer.
385 pub fn volatile_load<T>(src: *const T) -> T;
386 /// Perform a volatile store to the `dst` pointer.
387 pub fn volatile_store<T>(dst: *mut T, val: T);
388
389 /// Returns the square root of an `f32`
390 pub fn sqrtf32(x: f32) -> f32;
391 /// Returns the square root of an `f64`
392 pub fn sqrtf64(x: f64) -> f64;
393
394 /// Raises an `f32` to an integer power.
395 pub fn powif32(a: f32, x: i32) -> f32;
396 /// Raises an `f64` to an integer power.
397 pub fn powif64(a: f64, x: i32) -> f64;
398
399 /// Returns the sine of an `f32`.
400 pub fn sinf32(x: f32) -> f32;
401 /// Returns the sine of an `f64`.
402 pub fn sinf64(x: f64) -> f64;
403
404 /// Returns the cosine of an `f32`.
405 pub fn cosf32(x: f32) -> f32;
406 /// Returns the cosine of an `f64`.
407 pub fn cosf64(x: f64) -> f64;
408
409 /// Raises an `f32` to an `f32` power.
410 pub fn powf32(a: f32, x: f32) -> f32;
411 /// Raises an `f64` to an `f64` power.
412 pub fn powf64(a: f64, x: f64) -> f64;
413
414 /// Returns the exponential of an `f32`.
415 pub fn expf32(x: f32) -> f32;
416 /// Returns the exponential of an `f64`.
417 pub fn expf64(x: f64) -> f64;
418
419 /// Returns 2 raised to the power of an `f32`.
420 pub fn exp2f32(x: f32) -> f32;
421 /// Returns 2 raised to the power of an `f64`.
422 pub fn exp2f64(x: f64) -> f64;
423
424 /// Returns the natural logarithm of an `f32`.
425 pub fn logf32(x: f32) -> f32;
426 /// Returns the natural logarithm of an `f64`.
427 pub fn logf64(x: f64) -> f64;
428
429 /// Returns the base 10 logarithm of an `f32`.
430 pub fn log10f32(x: f32) -> f32;
431 /// Returns the base 10 logarithm of an `f64`.
432 pub fn log10f64(x: f64) -> f64;
433
434 /// Returns the base 2 logarithm of an `f32`.
435 pub fn log2f32(x: f32) -> f32;
436 /// Returns the base 2 logarithm of an `f64`.
437 pub fn log2f64(x: f64) -> f64;
438
439 /// Returns `a * b + c` for `f32` values.
440 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
441 /// Returns `a * b + c` for `f64` values.
442 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
443
444 /// Returns the absolute value of an `f32`.
445 pub fn fabsf32(x: f32) -> f32;
446 /// Returns the absolute value of an `f64`.
447 pub fn fabsf64(x: f64) -> f64;
448
449 /// Copies the sign from `y` to `x` for `f32` values.
450 pub fn copysignf32(x: f32, y: f32) -> f32;
451 /// Copies the sign from `y` to `x` for `f64` values.
452 pub fn copysignf64(x: f64, y: f64) -> f64;
453
454 /// Returns the largest integer less than or equal to an `f32`.
455 pub fn floorf32(x: f32) -> f32;
456 /// Returns the largest integer less than or equal to an `f64`.
457 pub fn floorf64(x: f64) -> f64;
458
459 /// Returns the smallest integer greater than or equal to an `f32`.
460 pub fn ceilf32(x: f32) -> f32;
461 /// Returns the smallest integer greater than or equal to an `f64`.
462 pub fn ceilf64(x: f64) -> f64;
463
464 /// Returns the integer part of an `f32`.
465 pub fn truncf32(x: f32) -> f32;
466 /// Returns the integer part of an `f64`.
467 pub fn truncf64(x: f64) -> f64;
468
469 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
470 /// if the argument is not an integer.
471 pub fn rintf32(x: f32) -> f32;
472 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
473 /// if the argument is not an integer.
474 pub fn rintf64(x: f64) -> f64;
475
476 /// Returns the nearest integer to an `f32`.
477 pub fn nearbyintf32(x: f32) -> f32;
478 /// Returns the nearest integer to an `f64`.
479 pub fn nearbyintf64(x: f64) -> f64;
480
481 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
482 pub fn roundf32(x: f32) -> f32;
483 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
484 pub fn roundf64(x: f64) -> f64;
485
486 /// Returns the number of bits set in a `u8`.
487 pub fn ctpop8(x: u8) -> u8;
488 /// Returns the number of bits set in a `u16`.
489 pub fn ctpop16(x: u16) -> u16;
490 /// Returns the number of bits set in a `u32`.
491 pub fn ctpop32(x: u32) -> u32;
492 /// Returns the number of bits set in a `u64`.
493 pub fn ctpop64(x: u64) -> u64;
494
495 /// Returns the number of leading bits unset in a `u8`.
496 pub fn ctlz8(x: u8) -> u8;
497 /// Returns the number of leading bits unset in a `u16`.
498 pub fn ctlz16(x: u16) -> u16;
499 /// Returns the number of leading bits unset in a `u32`.
500 pub fn ctlz32(x: u32) -> u32;
501 /// Returns the number of leading bits unset in a `u64`.
502 pub fn ctlz64(x: u64) -> u64;
503
504 /// Returns the number of trailing bits unset in a `u8`.
505 pub fn cttz8(x: u8) -> u8;
506 /// Returns the number of trailing bits unset in a `u16`.
507 pub fn cttz16(x: u16) -> u16;
508 /// Returns the number of trailing bits unset in a `u32`.
509 pub fn cttz32(x: u32) -> u32;
510 /// Returns the number of trailing bits unset in a `u64`.
511 pub fn cttz64(x: u64) -> u64;
512
513 /// Reverses the bytes in a `u16`.
514 pub fn bswap16(x: u16) -> u16;
515 /// Reverses the bytes in a `u32`.
516 pub fn bswap32(x: u32) -> u32;
517 /// Reverses the bytes in a `u64`.
518 pub fn bswap64(x: u64) -> u64;
519
520 /// Performs checked `i8` addition.
521 pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
522 /// Performs checked `i16` addition.
523 pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
524 /// Performs checked `i32` addition.
525 pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
526 /// Performs checked `i64` addition.
527 pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
528
529 /// Performs checked `u8` addition.
530 pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
531 /// Performs checked `u16` addition.
532 pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
533 /// Performs checked `u32` addition.
534 pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
535 /// Performs checked `u64` addition.
536 pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
537
538 /// Performs checked `i8` subtraction.
539 pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
540 /// Performs checked `i16` subtraction.
541 pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
542 /// Performs checked `i32` subtraction.
543 pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
544 /// Performs checked `i64` subtraction.
545 pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
546
547 /// Performs checked `u8` subtraction.
548 pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
549 /// Performs checked `u16` subtraction.
550 pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
551 /// Performs checked `u32` subtraction.
552 pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
553 /// Performs checked `u64` subtraction.
554 pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
555
556 /// Performs checked `i8` multiplication.
557 pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
558 /// Performs checked `i16` multiplication.
559 pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
560 /// Performs checked `i32` multiplication.
561 pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
562 /// Performs checked `i64` multiplication.
563 pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
564
565 /// Performs checked `u8` multiplication.
566 pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
567 /// Performs checked `u16` multiplication.
568 pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
569 /// Performs checked `u32` multiplication.
570 pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
571 /// Performs checked `u64` multiplication.
572 pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
c34b1796
AL
573
574 /// Returns (a + b) mod 2^N, where N is the width of N in bits.
575 pub fn overflowing_add<T>(a: T, b: T) -> T;
576 /// Returns (a - b) mod 2^N, where N is the width of N in bits.
577 pub fn overflowing_sub<T>(a: T, b: T) -> T;
578 /// Returns (a * b) mod 2^N, where N is the width of N in bits.
579 pub fn overflowing_mul<T>(a: T, b: T) -> T;
9346a6ac
AL
580
581 /// Returns the value of the discriminant for the variant in 'v',
582 /// cast to a `u64`; if `T` has no discriminant, returns 0.
9346a6ac 583 pub fn discriminant_value<T>(v: &T) -> u64;
1a4d82fc 584}
d9579d0f
AL
585
586#[cfg(not(stage0))]
587extern "rust-intrinsic" {
588 /// Performs an unchecked signed division, which results in undefined behavior,
589 /// in cases where y == 0, or x == int::MIN and y == -1
590 pub fn unchecked_sdiv<T>(x: T, y: T) -> T;
591 /// Performs an unchecked unsigned division, which results in undefined behavior,
592 /// in cases where y == 0
593 pub fn unchecked_udiv<T>(x: T, y: T) -> T;
594
595 /// Returns the remainder of an unchecked signed division, which results in
596 /// undefined behavior, in cases where y == 0, or x == int::MIN and y == -1
597 pub fn unchecked_urem<T>(x: T, y: T) -> T;
598 /// Returns the remainder of an unchecked signed division, which results in
599 /// undefined behavior, in cases where y == 0
600 pub fn unchecked_srem<T>(x: T, y: T) -> T;
601}