]>
Commit | Line | Data |
---|---|---|
abe05a73 XL |
1 | use core::intrinsics; |
2 | use core::mem; | |
3 | ||
4 | // Kernel-provided user-mode helper functions: | |
5 | // https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt | |
6 | unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool { | |
923072b8 | 7 | let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0usize as *const ()); |
ff7c6d11 | 8 | f(oldval, newval, ptr) == 0 |
abe05a73 XL |
9 | } |
10 | unsafe fn __kuser_memory_barrier() { | |
923072b8 | 11 | let f: extern "C" fn() = mem::transmute(0xffff0fa0usize as *const ()); |
ff7c6d11 | 12 | f(); |
abe05a73 XL |
13 | } |
14 | ||
15 | // Word-align a pointer | |
16 | fn align_ptr<T>(ptr: *mut T) -> *mut u32 { | |
17 | // This gives us a mask of 0 when T == u32 since the pointer is already | |
18 | // supposed to be aligned, which avoids any masking in that case. | |
19 | let ptr_mask = 3 & (4 - mem::size_of::<T>()); | |
20 | (ptr as usize & !ptr_mask) as *mut u32 | |
21 | } | |
22 | ||
23 | // Calculate the shift and mask of a value inside an aligned word | |
24 | fn get_shift_mask<T>(ptr: *mut T) -> (u32, u32) { | |
25 | // Mask to get the low byte/halfword/word | |
26 | let mask = match mem::size_of::<T>() { | |
27 | 1 => 0xff, | |
28 | 2 => 0xffff, | |
29 | 4 => 0xffffffff, | |
30 | _ => unreachable!(), | |
31 | }; | |
32 | ||
33 | // If we are on big-endian then we need to adjust the shift accordingly | |
34 | let endian_adjust = if cfg!(target_endian = "little") { | |
35 | 0 | |
36 | } else { | |
37 | 4 - mem::size_of::<T>() as u32 | |
38 | }; | |
39 | ||
40 | // Shift to get the desired element in the word | |
41 | let ptr_mask = 3 & (4 - mem::size_of::<T>()); | |
42 | let shift = ((ptr as usize & ptr_mask) as u32 ^ endian_adjust) * 8; | |
43 | ||
44 | (shift, mask) | |
45 | } | |
46 | ||
47 | // Extract a value from an aligned word | |
48 | fn extract_aligned(aligned: u32, shift: u32, mask: u32) -> u32 { | |
49 | (aligned >> shift) & mask | |
50 | } | |
51 | ||
52 | // Insert a value into an aligned word | |
53 | fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 { | |
54 | (aligned & !(mask << shift)) | ((val & mask) << shift) | |
55 | } | |
56 | ||
57 | // Generic atomic read-modify-write operation | |
58 | unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 { | |
59 | let aligned_ptr = align_ptr(ptr); | |
60 | let (shift, mask) = get_shift_mask(ptr); | |
61 | ||
62 | loop { | |
63 | let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr); | |
64 | let curval = extract_aligned(curval_aligned, shift, mask); | |
65 | let newval = f(curval); | |
66 | let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask); | |
67 | if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) { | |
68 | return curval; | |
69 | } | |
70 | } | |
71 | } | |
72 | ||
73 | // Generic atomic compare-exchange operation | |
ff7c6d11 | 74 | unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 { |
abe05a73 XL |
75 | let aligned_ptr = align_ptr(ptr); |
76 | let (shift, mask) = get_shift_mask(ptr); | |
77 | ||
78 | loop { | |
79 | let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr); | |
80 | let curval = extract_aligned(curval_aligned, shift, mask); | |
81 | if curval != oldval { | |
82 | return curval; | |
83 | } | |
84 | let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask); | |
85 | if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) { | |
86 | return oldval; | |
87 | } | |
88 | } | |
89 | } | |
90 | ||
91 | macro_rules! atomic_rmw { | |
92 | ($name:ident, $ty:ty, $op:expr) => { | |
5099ac24 FG |
93 | intrinsics! { |
94 | pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty { | |
95 | atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty | |
96 | } | |
abe05a73 | 97 | } |
48663c56 | 98 | }; |
abe05a73 XL |
99 | } |
100 | macro_rules! atomic_cmpxchg { | |
101 | ($name:ident, $ty:ty) => { | |
5099ac24 FG |
102 | intrinsics! { |
103 | pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty { | |
104 | atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty | |
105 | } | |
abe05a73 | 106 | } |
48663c56 | 107 | }; |
abe05a73 XL |
108 | } |
109 | ||
110 | atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b)); | |
48663c56 XL |
111 | atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a |
112 | .wrapping_add(b)); | |
113 | atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a | |
114 | .wrapping_add(b)); | |
abe05a73 XL |
115 | |
116 | atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); | |
48663c56 XL |
117 | atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a |
118 | .wrapping_sub(b)); | |
119 | atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a | |
120 | .wrapping_sub(b)); | |
abe05a73 XL |
121 | |
122 | atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b); | |
123 | atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b); | |
124 | atomic_rmw!(__sync_fetch_and_and_4, u32, |a: u32, b: u32| a & b); | |
125 | ||
126 | atomic_rmw!(__sync_fetch_and_or_1, u8, |a: u8, b: u8| a | b); | |
127 | atomic_rmw!(__sync_fetch_and_or_2, u16, |a: u16, b: u16| a | b); | |
128 | atomic_rmw!(__sync_fetch_and_or_4, u32, |a: u32, b: u32| a | b); | |
129 | ||
130 | atomic_rmw!(__sync_fetch_and_xor_1, u8, |a: u8, b: u8| a ^ b); | |
131 | atomic_rmw!(__sync_fetch_and_xor_2, u16, |a: u16, b: u16| a ^ b); | |
132 | atomic_rmw!(__sync_fetch_and_xor_4, u32, |a: u32, b: u32| a ^ b); | |
133 | ||
b7449926 XL |
134 | atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b)); |
135 | atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b)); | |
136 | atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b)); | |
abe05a73 | 137 | |
48663c56 XL |
138 | atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { |
139 | a | |
140 | } else { | |
141 | b | |
142 | }); | |
143 | atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { | |
144 | a | |
145 | } else { | |
146 | b | |
147 | }); | |
148 | atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { | |
149 | a | |
150 | } else { | |
151 | b | |
152 | }); | |
153 | ||
154 | atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { | |
155 | a | |
156 | } else { | |
157 | b | |
158 | }); | |
159 | atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { | |
160 | a | |
161 | } else { | |
162 | b | |
163 | }); | |
164 | atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { | |
165 | a | |
166 | } else { | |
167 | b | |
168 | }); | |
169 | ||
170 | atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { | |
171 | a | |
172 | } else { | |
173 | b | |
174 | }); | |
175 | atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { | |
176 | a | |
177 | } else { | |
178 | b | |
179 | }); | |
180 | atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { | |
181 | a | |
182 | } else { | |
183 | b | |
184 | }); | |
185 | ||
186 | atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { | |
187 | a | |
188 | } else { | |
189 | b | |
190 | }); | |
191 | atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { | |
192 | a | |
193 | } else { | |
194 | b | |
195 | }); | |
196 | atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { | |
197 | a | |
198 | } else { | |
199 | b | |
200 | }); | |
abe05a73 XL |
201 | |
202 | atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b); | |
203 | atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b); | |
204 | atomic_rmw!(__sync_lock_test_and_set_4, u32, |_: u32, b: u32| b); | |
205 | ||
206 | atomic_cmpxchg!(__sync_val_compare_and_swap_1, u8); | |
207 | atomic_cmpxchg!(__sync_val_compare_and_swap_2, u16); | |
208 | atomic_cmpxchg!(__sync_val_compare_and_swap_4, u32); | |
209 | ||
5099ac24 FG |
210 | intrinsics! { |
211 | pub unsafe extern "C" fn __sync_synchronize() { | |
212 | __kuser_memory_barrier(); | |
213 | } | |
abe05a73 | 214 | } |