]> git.proxmox.com Git - rustc.git/blob - vendor/compiler_builtins/src/mem.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / vendor / compiler_builtins / src / mem.rs
1 #[allow(warnings)]
2 #[cfg(target_pointer_width = "16")]
3 type c_int = i16;
4 #[allow(warnings)]
5 #[cfg(not(target_pointer_width = "16"))]
6 type c_int = i32;
7
8 use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div};
9 use core::mem;
10 use core::ops::{BitOr, Shl};
11
12 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
13 pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
14 let mut i = 0;
15 while i < n {
16 *dest.offset(i as isize) = *src.offset(i as isize);
17 i += 1;
18 }
19 dest
20 }
21
22 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
23 pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
24 if src < dest as *const u8 {
25 // copy from end
26 let mut i = n;
27 while i != 0 {
28 i -= 1;
29 *dest.offset(i as isize) = *src.offset(i as isize);
30 }
31 } else {
32 // copy from beginning
33 let mut i = 0;
34 while i < n {
35 *dest.offset(i as isize) = *src.offset(i as isize);
36 i += 1;
37 }
38 }
39 dest
40 }
41
42 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
43 pub unsafe extern "C" fn memset(s: *mut u8, c: c_int, n: usize) -> *mut u8 {
44 let mut i = 0;
45 while i < n {
46 *s.offset(i as isize) = c as u8;
47 i += 1;
48 }
49 s
50 }
51
52 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
53 pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
54 let mut i = 0;
55 while i < n {
56 let a = *s1.offset(i as isize);
57 let b = *s2.offset(i as isize);
58 if a != b {
59 return a as i32 - b as i32;
60 }
61 i += 1;
62 }
63 0
64 }
65
66 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
67 pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
68 memcmp(s1, s2, n)
69 }
70
71 // `bytes` must be a multiple of `mem::size_of::<T>()`
72 fn memcpy_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
73 unsafe {
74 let n = exact_div(bytes, mem::size_of::<T>());
75 let mut i = 0;
76 while i < n {
77 atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
78 i += 1;
79 }
80 }
81 }
82
83 // `bytes` must be a multiple of `mem::size_of::<T>()`
84 fn memmove_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
85 unsafe {
86 let n = exact_div(bytes, mem::size_of::<T>());
87 if src < dest as *const T {
88 // copy from end
89 let mut i = n;
90 while i != 0 {
91 i -= 1;
92 atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
93 }
94 } else {
95 // copy from beginning
96 let mut i = 0;
97 while i < n {
98 atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
99 i += 1;
100 }
101 }
102 }
103 }
104
105 // `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::<T>()`
106 fn memset_element_unordered_atomic<T>(s: *mut T, c: u8, bytes: usize)
107 where
108 T: Copy + From<u8> + Shl<u32, Output = T> + BitOr<T, Output = T>,
109 {
110 unsafe {
111 let n = exact_div(bytes, mem::size_of::<T>());
112
113 // Construct a value of type `T` consisting of repeated `c`
114 // bytes, to let us ensure we write each `T` atomically.
115 let mut x = T::from(c);
116 let mut i = 1;
117 while i < mem::size_of::<T>() {
118 x = x << 8 | T::from(c);
119 i += 1;
120 }
121
122 // Write it to `s`
123 let mut i = 0;
124 while i < n {
125 atomic_store_unordered(s.add(i), x);
126 i += 1;
127 }
128 }
129 }
130
131 intrinsics! {
132 #[cfg(target_has_atomic_load_store = "8")]
133 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
134 memcpy_element_unordered_atomic(dest, src, bytes);
135 }
136 #[cfg(target_has_atomic_load_store = "16")]
137 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
138 memcpy_element_unordered_atomic(dest, src, bytes);
139 }
140 #[cfg(target_has_atomic_load_store = "32")]
141 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
142 memcpy_element_unordered_atomic(dest, src, bytes);
143 }
144 #[cfg(target_has_atomic_load_store = "64")]
145 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
146 memcpy_element_unordered_atomic(dest, src, bytes);
147 }
148 #[cfg(target_has_atomic_load_store = "128")]
149 pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
150 memcpy_element_unordered_atomic(dest, src, bytes);
151 }
152
153 #[cfg(target_has_atomic_load_store = "8")]
154 pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
155 memmove_element_unordered_atomic(dest, src, bytes);
156 }
157 #[cfg(target_has_atomic_load_store = "16")]
158 pub extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
159 memmove_element_unordered_atomic(dest, src, bytes);
160 }
161 #[cfg(target_has_atomic_load_store = "32")]
162 pub extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
163 memmove_element_unordered_atomic(dest, src, bytes);
164 }
165 #[cfg(target_has_atomic_load_store = "64")]
166 pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
167 memmove_element_unordered_atomic(dest, src, bytes);
168 }
169 #[cfg(target_has_atomic_load_store = "128")]
170 pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
171 memmove_element_unordered_atomic(dest, src, bytes);
172 }
173
174 #[cfg(target_has_atomic_load_store = "8")]
175 pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
176 memset_element_unordered_atomic(s, c, bytes);
177 }
178 #[cfg(target_has_atomic_load_store = "16")]
179 pub extern "C" fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) -> () {
180 memset_element_unordered_atomic(s, c, bytes);
181 }
182 #[cfg(target_has_atomic_load_store = "32")]
183 pub extern "C" fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) -> () {
184 memset_element_unordered_atomic(s, c, bytes);
185 }
186 #[cfg(target_has_atomic_load_store = "64")]
187 pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
188 memset_element_unordered_atomic(s, c, bytes);
189 }
190 #[cfg(target_has_atomic_load_store = "128")]
191 pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
192 memset_element_unordered_atomic(s, c, bytes);
193 }
194 }