]> git.proxmox.com Git - rustc.git/blame - vendor/compiler_builtins/src/arm_linux.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / vendor / compiler_builtins / src / arm_linux.rs
CommitLineData
abe05a73
XL
1use core::intrinsics;
2use core::mem;
3
4// Kernel-provided user-mode helper functions:
5// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
6unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool {
923072b8 7 let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0usize as *const ());
ff7c6d11 8 f(oldval, newval, ptr) == 0
abe05a73
XL
9}
10unsafe fn __kuser_memory_barrier() {
923072b8 11 let f: extern "C" fn() = mem::transmute(0xffff0fa0usize as *const ());
ff7c6d11 12 f();
abe05a73
XL
13}
14
15// Word-align a pointer
16fn align_ptr<T>(ptr: *mut T) -> *mut u32 {
17 // This gives us a mask of 0 when T == u32 since the pointer is already
18 // supposed to be aligned, which avoids any masking in that case.
19 let ptr_mask = 3 & (4 - mem::size_of::<T>());
20 (ptr as usize & !ptr_mask) as *mut u32
21}
22
23// Calculate the shift and mask of a value inside an aligned word
24fn get_shift_mask<T>(ptr: *mut T) -> (u32, u32) {
25 // Mask to get the low byte/halfword/word
26 let mask = match mem::size_of::<T>() {
27 1 => 0xff,
28 2 => 0xffff,
29 4 => 0xffffffff,
30 _ => unreachable!(),
31 };
32
33 // If we are on big-endian then we need to adjust the shift accordingly
34 let endian_adjust = if cfg!(target_endian = "little") {
35 0
36 } else {
37 4 - mem::size_of::<T>() as u32
38 };
39
40 // Shift to get the desired element in the word
41 let ptr_mask = 3 & (4 - mem::size_of::<T>());
42 let shift = ((ptr as usize & ptr_mask) as u32 ^ endian_adjust) * 8;
43
44 (shift, mask)
45}
46
47// Extract a value from an aligned word
48fn extract_aligned(aligned: u32, shift: u32, mask: u32) -> u32 {
49 (aligned >> shift) & mask
50}
51
52// Insert a value into an aligned word
53fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 {
54 (aligned & !(mask << shift)) | ((val & mask) << shift)
55}
56
57// Generic atomic read-modify-write operation
58unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 {
59 let aligned_ptr = align_ptr(ptr);
60 let (shift, mask) = get_shift_mask(ptr);
61
62 loop {
63 let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr);
64 let curval = extract_aligned(curval_aligned, shift, mask);
65 let newval = f(curval);
66 let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask);
67 if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) {
68 return curval;
69 }
70 }
71}
72
73// Generic atomic compare-exchange operation
ff7c6d11 74unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 {
abe05a73
XL
75 let aligned_ptr = align_ptr(ptr);
76 let (shift, mask) = get_shift_mask(ptr);
77
78 loop {
79 let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr);
80 let curval = extract_aligned(curval_aligned, shift, mask);
81 if curval != oldval {
82 return curval;
83 }
84 let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask);
85 if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) {
86 return oldval;
87 }
88 }
89}
90
91macro_rules! atomic_rmw {
92 ($name:ident, $ty:ty, $op:expr) => {
5099ac24
FG
93 intrinsics! {
94 pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty {
95 atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty
96 }
abe05a73 97 }
48663c56 98 };
abe05a73
XL
99}
100macro_rules! atomic_cmpxchg {
101 ($name:ident, $ty:ty) => {
5099ac24
FG
102 intrinsics! {
103 pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty {
104 atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty
105 }
abe05a73 106 }
48663c56 107 };
abe05a73
XL
108}
109
110atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b));
48663c56
XL
111atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a
112 .wrapping_add(b));
113atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a
114 .wrapping_add(b));
abe05a73
XL
115
116atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b));
48663c56
XL
117atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a
118 .wrapping_sub(b));
119atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a
120 .wrapping_sub(b));
abe05a73
XL
121
122atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b);
123atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b);
124atomic_rmw!(__sync_fetch_and_and_4, u32, |a: u32, b: u32| a & b);
125
126atomic_rmw!(__sync_fetch_and_or_1, u8, |a: u8, b: u8| a | b);
127atomic_rmw!(__sync_fetch_and_or_2, u16, |a: u16, b: u16| a | b);
128atomic_rmw!(__sync_fetch_and_or_4, u32, |a: u32, b: u32| a | b);
129
130atomic_rmw!(__sync_fetch_and_xor_1, u8, |a: u8, b: u8| a ^ b);
131atomic_rmw!(__sync_fetch_and_xor_2, u16, |a: u16, b: u16| a ^ b);
132atomic_rmw!(__sync_fetch_and_xor_4, u32, |a: u32, b: u32| a ^ b);
133
b7449926
XL
134atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
135atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b));
136atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b));
abe05a73 137
48663c56
XL
138atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b {
139 a
140} else {
141 b
142});
143atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b {
144 a
145} else {
146 b
147});
148atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b {
149 a
150} else {
151 b
152});
153
154atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b {
155 a
156} else {
157 b
158});
159atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b {
160 a
161} else {
162 b
163});
164atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b {
165 a
166} else {
167 b
168});
169
170atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b {
171 a
172} else {
173 b
174});
175atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b {
176 a
177} else {
178 b
179});
180atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b {
181 a
182} else {
183 b
184});
185
186atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b {
187 a
188} else {
189 b
190});
191atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b {
192 a
193} else {
194 b
195});
196atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b {
197 a
198} else {
199 b
200});
abe05a73
XL
201
202atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b);
203atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b);
204atomic_rmw!(__sync_lock_test_and_set_4, u32, |_: u32, b: u32| b);
205
206atomic_cmpxchg!(__sync_val_compare_and_swap_1, u8);
207atomic_cmpxchg!(__sync_val_compare_and_swap_2, u16);
208atomic_cmpxchg!(__sync_val_compare_and_swap_4, u32);
209
5099ac24
FG
210intrinsics! {
211 pub unsafe extern "C" fn __sync_synchronize() {
212 __kuser_memory_barrier();
213 }
abe05a73 214}