2 #[cfg(target_pointer_width = "16")]
5 #[cfg(not(target_pointer_width = "16"))]
8 use core
::intrinsics
::{atomic_load_unordered, atomic_store_unordered, exact_div}
;
10 use core
::ops
::{BitOr, Shl}
;
12 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
13 pub unsafe extern "C" fn memcpy(dest
: *mut u8, src
: *const u8, n
: usize) -> *mut u8 {
16 *dest
.offset(i
as isize) = *src
.offset(i
as isize);
22 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
23 pub unsafe extern "C" fn memmove(dest
: *mut u8, src
: *const u8, n
: usize) -> *mut u8 {
24 if src
< dest
as *const u8 {
29 *dest
.offset(i
as isize) = *src
.offset(i
as isize);
32 // copy from beginning
35 *dest
.offset(i
as isize) = *src
.offset(i
as isize);
42 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
43 pub unsafe extern "C" fn memset(s
: *mut u8, c
: c_int
, n
: usize) -> *mut u8 {
46 *s
.offset(i
as isize) = c
as u8;
52 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
53 pub unsafe extern "C" fn memcmp(s1
: *const u8, s2
: *const u8, n
: usize) -> i32 {
56 let a
= *s1
.offset(i
as isize);
57 let b
= *s2
.offset(i
as isize);
59 return a
as i32 - b
as i32;
66 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
67 pub unsafe extern "C" fn bcmp(s1
: *const u8, s2
: *const u8, n
: usize) -> i32 {
71 // `bytes` must be a multiple of `mem::size_of::<T>()`
72 fn memcpy_element_unordered_atomic
<T
: Copy
>(dest
: *mut T
, src
: *const T
, bytes
: usize) {
74 let n
= exact_div(bytes
, mem
::size_of
::<T
>());
77 atomic_store_unordered(dest
.add(i
), atomic_load_unordered(src
.add(i
)));
83 // `bytes` must be a multiple of `mem::size_of::<T>()`
84 fn memmove_element_unordered_atomic
<T
: Copy
>(dest
: *mut T
, src
: *const T
, bytes
: usize) {
86 let n
= exact_div(bytes
, mem
::size_of
::<T
>());
87 if src
< dest
as *const T
{
92 atomic_store_unordered(dest
.add(i
), atomic_load_unordered(src
.add(i
)));
95 // copy from beginning
98 atomic_store_unordered(dest
.add(i
), atomic_load_unordered(src
.add(i
)));
105 // `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::<T>()`
106 fn memset_element_unordered_atomic
<T
>(s
: *mut T
, c
: u8, bytes
: usize)
108 T
: Copy
+ From
<u8> + Shl
<u32, Output
= T
> + BitOr
<T
, Output
= T
>,
111 let n
= exact_div(bytes
, mem
::size_of
::<T
>());
113 // Construct a value of type `T` consisting of repeated `c`
114 // bytes, to let us ensure we write each `T` atomically.
115 let mut x
= T
::from(c
);
117 while i
< mem
::size_of
::<T
>() {
118 x
= x
<< 8 | T
::from(c
);
125 atomic_store_unordered(s
.add(i
), x
);
132 #[cfg(target_has_atomic_load_store = "8")]
133 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest
: *mut u8, src
: *const u8, bytes
: usize) -> () {
134 memcpy_element_unordered_atomic(dest
, src
, bytes
);
136 #[cfg(target_has_atomic_load_store = "16")]
137 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest
: *mut u16, src
: *const u16, bytes
: usize) -> () {
138 memcpy_element_unordered_atomic(dest
, src
, bytes
);
140 #[cfg(target_has_atomic_load_store = "32")]
141 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest
: *mut u32, src
: *const u32, bytes
: usize) -> () {
142 memcpy_element_unordered_atomic(dest
, src
, bytes
);
144 #[cfg(target_has_atomic_load_store = "64")]
145 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest
: *mut u64, src
: *const u64, bytes
: usize) -> () {
146 memcpy_element_unordered_atomic(dest
, src
, bytes
);
148 #[cfg(target_has_atomic_load_store = "128")]
149 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest
: *mut u128
, src
: *const u128
, bytes
: usize) -> () {
150 memcpy_element_unordered_atomic(dest
, src
, bytes
);
153 #[cfg(target_has_atomic_load_store = "8")]
154 pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest
: *mut u8, src
: *const u8, bytes
: usize) -> () {
155 memmove_element_unordered_atomic(dest
, src
, bytes
);
157 #[cfg(target_has_atomic_load_store = "16")]
158 pub extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest
: *mut u16, src
: *const u16, bytes
: usize) -> () {
159 memmove_element_unordered_atomic(dest
, src
, bytes
);
161 #[cfg(target_has_atomic_load_store = "32")]
162 pub extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest
: *mut u32, src
: *const u32, bytes
: usize) -> () {
163 memmove_element_unordered_atomic(dest
, src
, bytes
);
165 #[cfg(target_has_atomic_load_store = "64")]
166 pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest
: *mut u64, src
: *const u64, bytes
: usize) -> () {
167 memmove_element_unordered_atomic(dest
, src
, bytes
);
169 #[cfg(target_has_atomic_load_store = "128")]
170 pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest
: *mut u128
, src
: *const u128
, bytes
: usize) -> () {
171 memmove_element_unordered_atomic(dest
, src
, bytes
);
174 #[cfg(target_has_atomic_load_store = "8")]
175 pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s
: *mut u8, c
: u8, bytes
: usize) -> () {
176 memset_element_unordered_atomic(s
, c
, bytes
);
178 #[cfg(target_has_atomic_load_store = "16")]
179 pub extern "C" fn __llvm_memset_element_unordered_atomic_2(s
: *mut u16, c
: u8, bytes
: usize) -> () {
180 memset_element_unordered_atomic(s
, c
, bytes
);
182 #[cfg(target_has_atomic_load_store = "32")]
183 pub extern "C" fn __llvm_memset_element_unordered_atomic_4(s
: *mut u32, c
: u8, bytes
: usize) -> () {
184 memset_element_unordered_atomic(s
, c
, bytes
);
186 #[cfg(target_has_atomic_load_store = "64")]
187 pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s
: *mut u64, c
: u8, bytes
: usize) -> () {
188 memset_element_unordered_atomic(s
, c
, bytes
);
190 #[cfg(target_has_atomic_load_store = "128")]
191 pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s
: *mut u128
, c
: u8, bytes
: usize) -> () {
192 memset_element_unordered_atomic(s
, c
, bytes
);