]> git.proxmox.com Git - rustc.git/blame - vendor/portable-atomic/src/tests/helper.rs
New upstream version 1.75.0+dfsg1
[rustc.git] / vendor / portable-atomic / src / tests / helper.rs
CommitLineData
ed00b5ec
FG
1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
781aab86
FG
3#![allow(unused_macros)]
4
5use core::sync::atomic::Ordering;
6
7macro_rules! __test_atomic_common {
8 ($atomic_type:ty, $value_type:ty) => {
9 #[test]
10 fn assert_auto_traits() {
11 fn _assert<T: Send + Sync + Unpin + std::panic::UnwindSafe>() {}
12 _assert::<$atomic_type>();
13 }
14 #[test]
15 fn alignment() {
16 // https://github.com/rust-lang/rust/blob/1.70.0/library/core/tests/atomic.rs#L250
17 assert_eq!(core::mem::align_of::<$atomic_type>(), core::mem::size_of::<$atomic_type>());
18 assert_eq!(core::mem::size_of::<$atomic_type>(), core::mem::size_of::<$value_type>());
19 }
20 #[test]
21 fn is_lock_free() {
22 const IS_ALWAYS_LOCK_FREE: bool = <$atomic_type>::is_always_lock_free();
23 assert_eq!(IS_ALWAYS_LOCK_FREE, <$atomic_type>::is_always_lock_free());
24 let is_lock_free = <$atomic_type>::is_lock_free();
25 if IS_ALWAYS_LOCK_FREE {
26 // If is_always_lock_free is true, then is_lock_free must always be true.
27 assert!(is_lock_free);
28 }
29 }
30 };
31}
32macro_rules! __test_atomic_pub_common {
33 ($atomic_type:ty, $value_type:ty) => {
34 #[test]
35 fn assert_ref_unwind_safe() {
36 #[cfg(not(all(portable_atomic_no_core_unwind_safe, not(feature = "std"))))]
37 static_assertions::assert_impl_all!($atomic_type: std::panic::RefUnwindSafe);
38 #[cfg(all(portable_atomic_no_core_unwind_safe, not(feature = "std")))]
39 static_assertions::assert_not_impl_all!($atomic_type: std::panic::RefUnwindSafe);
40 }
41 };
42}
43
44macro_rules! __test_atomic_int_load_store {
45 ($atomic_type:ty, $int_type:ident, single_thread) => {
46 __test_atomic_common!($atomic_type, $int_type);
47 use crate::tests::helper::*;
48 #[test]
49 fn accessor() {
50 let mut a = <$atomic_type>::new(10);
51 assert_eq!(*a.get_mut(), 10);
52 *a.get_mut() = 5;
53 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
54 assert_eq!(a.into_inner(), 5);
55 }
56 // https://bugs.llvm.org/show_bug.cgi?id=37061
57 #[test]
58 fn static_load_only() {
59 static VAR: $atomic_type = <$atomic_type>::new(10);
60 for &order in &test_helper::LOAD_ORDERINGS {
61 assert_eq!(VAR.load(order), 10);
62 }
63 }
64 #[test]
65 fn load_store() {
66 static VAR: $atomic_type = <$atomic_type>::new(10);
67 test_load_ordering(|order| VAR.load(order));
68 test_store_ordering(|order| VAR.store(10, order));
69 for (&load_order, &store_order) in
70 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
71 {
72 assert_eq!(VAR.load(load_order), 10);
73 VAR.store(5, store_order);
74 assert_eq!(VAR.load(load_order), 5);
75 VAR.store(10, store_order);
76 let a = <$atomic_type>::new(1);
77 assert_eq!(a.load(load_order), 1);
78 a.store(2, store_order);
79 assert_eq!(a.load(load_order), 2);
80 }
81 }
82 };
83 ($atomic_type:ty, $int_type:ident) => {
84 __test_atomic_int_load_store!($atomic_type, $int_type, single_thread);
85 use crossbeam_utils::thread;
86 use std::{collections::BTreeSet, vec, vec::Vec};
87 #[test]
88 fn stress_load_store() {
89 let (iterations, threads) = stress_test_config();
90 let data1 = (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>();
91 let set = data1.iter().copied().collect::<BTreeSet<_>>();
92 let a = <$atomic_type>::new(data1[fastrand::usize(0..iterations)]);
93 let now = &std::time::Instant::now();
94 thread::scope(|s| {
95 for _ in 0..threads {
96 s.spawn(|_| {
97 let now = *now;
98 for i in 0..iterations {
99 a.store(data1[i], rand_store_ordering());
100 }
101 std::eprintln!("store end={:?}", now.elapsed());
102 });
103 s.spawn(|_| {
104 let now = *now;
105 let mut v = vec![0; iterations];
106 for i in 0..iterations {
107 v[i] = a.load(rand_load_ordering());
108 }
109 std::eprintln!("load end={:?}", now.elapsed());
110 for v in v {
111 assert!(set.contains(&v), "v={}", v);
112 }
113 });
114 }
115 })
116 .unwrap();
117 }
118 };
119}
120macro_rules! __test_atomic_float_load_store {
121 ($atomic_type:ty, $float_type:ident, single_thread) => {
122 __test_atomic_common!($atomic_type, $float_type);
123 use crate::tests::helper::*;
124 #[test]
125 fn accessor() {
126 let mut a = <$atomic_type>::new(10.0);
127 assert_eq!(*a.get_mut(), 10.0);
128 *a.get_mut() = 5.0;
129 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
130 assert_eq!(a.into_inner(), 5.0);
131 }
132 // https://bugs.llvm.org/show_bug.cgi?id=37061
133 #[test]
134 fn static_load_only() {
135 static VAR: $atomic_type = <$atomic_type>::new(10.0);
136 for &order in &test_helper::LOAD_ORDERINGS {
137 assert_eq!(VAR.load(order), 10.0);
138 }
139 }
140 #[test]
141 fn load_store() {
142 static VAR: $atomic_type = <$atomic_type>::new(10.0);
143 test_load_ordering(|order| VAR.load(order));
144 test_store_ordering(|order| VAR.store(10.0, order));
145 for (&load_order, &store_order) in
146 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
147 {
148 assert_eq!(VAR.load(load_order), 10.0);
149 VAR.store(5.0, store_order);
150 assert_eq!(VAR.load(load_order), 5.0);
151 VAR.store(10.0, store_order);
152 let a = <$atomic_type>::new(1.0);
153 assert_eq!(a.load(load_order), 1.0);
154 a.store(2.0, store_order);
155 assert_eq!(a.load(load_order), 2.0);
156 }
157 }
158 };
159 ($atomic_type:ty, $float_type:ident) => {
160 __test_atomic_float_load_store!($atomic_type, $float_type, single_thread);
161 // TODO: multi thread
162 };
163}
164macro_rules! __test_atomic_bool_load_store {
165 ($atomic_type:ty, single_thread) => {
166 __test_atomic_common!($atomic_type, bool);
167 use crate::tests::helper::*;
168 #[test]
169 fn accessor() {
170 let mut a = <$atomic_type>::new(false);
171 assert_eq!(*a.get_mut(), false);
172 *a.get_mut() = true;
173 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
174 assert_eq!(a.into_inner(), true);
175 }
176 // https://bugs.llvm.org/show_bug.cgi?id=37061
177 #[test]
178 fn static_load_only() {
179 static VAR: $atomic_type = <$atomic_type>::new(false);
180 for &order in &test_helper::LOAD_ORDERINGS {
181 assert_eq!(VAR.load(order), false);
182 }
183 }
184 #[test]
185 fn load_store() {
186 static VAR: $atomic_type = <$atomic_type>::new(false);
187 test_load_ordering(|order| VAR.load(order));
188 test_store_ordering(|order| VAR.store(false, order));
189 for (&load_order, &store_order) in
190 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
191 {
192 assert_eq!(VAR.load(load_order), false);
193 VAR.store(true, store_order);
194 assert_eq!(VAR.load(load_order), true);
195 VAR.store(false, store_order);
196 let a = <$atomic_type>::new(true);
197 assert_eq!(a.load(load_order), true);
198 a.store(false, store_order);
199 assert_eq!(a.load(load_order), false);
200 }
201 }
202 };
203 ($atomic_type:ty) => {
204 __test_atomic_bool_load_store!($atomic_type, single_thread);
205 // TODO: multi thread
206 };
207}
208macro_rules! __test_atomic_ptr_load_store {
209 ($atomic_type:ty, single_thread) => {
210 __test_atomic_common!($atomic_type, *mut u8);
211 use crate::tests::helper::*;
212 use std::ptr;
213 #[test]
214 fn accessor() {
215 let mut v = 1;
216 let mut a = <$atomic_type>::new(ptr::null_mut());
217 assert!(a.get_mut().is_null());
218 *a.get_mut() = &mut v;
219 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
220 assert!(!a.into_inner().is_null());
221 }
222 // https://bugs.llvm.org/show_bug.cgi?id=37061
223 #[test]
224 fn static_load_only() {
225 static VAR: $atomic_type = <$atomic_type>::new(ptr::null_mut());
226 for &order in &test_helper::LOAD_ORDERINGS {
227 assert_eq!(VAR.load(order), ptr::null_mut());
228 }
229 }
230 #[test]
231 fn load_store() {
232 static VAR: $atomic_type = <$atomic_type>::new(ptr::null_mut());
233 test_load_ordering(|order| VAR.load(order));
234 test_store_ordering(|order| VAR.store(ptr::null_mut(), order));
235 let mut v = 1_u8;
236 let p = &mut v as *mut u8;
237 for (&load_order, &store_order) in
238 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
239 {
240 assert_eq!(VAR.load(load_order), ptr::null_mut());
241 VAR.store(p, store_order);
242 assert_eq!(VAR.load(load_order), p);
243 VAR.store(ptr::null_mut(), store_order);
244 let a = <$atomic_type>::new(p);
245 assert_eq!(a.load(load_order), p);
246 a.store(ptr::null_mut(), store_order);
247 assert_eq!(a.load(load_order), ptr::null_mut());
248 }
249 }
250 };
251 ($atomic_type:ty) => {
252 __test_atomic_ptr_load_store!($atomic_type, single_thread);
253 // TODO: multi thread
254 };
255}
256
257macro_rules! __test_atomic_int {
258 ($atomic_type:ty, $int_type:ident, single_thread) => {
259 use core::$int_type;
260 #[test]
261 fn swap() {
262 let a = <$atomic_type>::new(5);
263 test_swap_ordering(|order| a.swap(5, order));
264 for &order in &test_helper::SWAP_ORDERINGS {
265 assert_eq!(a.swap(10, order), 5);
266 assert_eq!(a.swap(5, order), 10);
267 }
268 }
269 #[test]
270 fn compare_exchange() {
271 let a = <$atomic_type>::new(5);
272 test_compare_exchange_ordering(|success, failure| {
273 a.compare_exchange(5, 5, success, failure)
274 });
275 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
276 let a = <$atomic_type>::new(5);
277 assert_eq!(a.compare_exchange(5, 10, success, failure), Ok(5));
278 assert_eq!(a.load(Ordering::Relaxed), 10);
279 assert_eq!(a.compare_exchange(6, 12, success, failure), Err(10));
280 assert_eq!(a.load(Ordering::Relaxed), 10);
281 }
282 }
283 #[test]
284 fn compare_exchange_weak() {
285 let a = <$atomic_type>::new(4);
286 test_compare_exchange_ordering(|success, failure| {
287 a.compare_exchange_weak(4, 4, success, failure)
288 });
289 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
290 let a = <$atomic_type>::new(4);
291 assert_eq!(a.compare_exchange_weak(6, 8, success, failure), Err(4));
292 let mut old = a.load(Ordering::Relaxed);
293 loop {
294 let new = old * 2;
295 match a.compare_exchange_weak(old, new, success, failure) {
296 Ok(_) => break,
297 Err(x) => old = x,
298 }
299 }
300 assert_eq!(a.load(Ordering::Relaxed), 8);
301 }
302 }
303 #[test]
304 fn fetch_add() {
305 let a = <$atomic_type>::new(0);
306 test_swap_ordering(|order| a.fetch_add(0, order));
307 for &order in &test_helper::SWAP_ORDERINGS {
308 let a = <$atomic_type>::new(0);
309 assert_eq!(a.fetch_add(10, order), 0);
310 assert_eq!(a.load(Ordering::Relaxed), 10);
311 let a = <$atomic_type>::new($int_type::MAX);
312 assert_eq!(a.fetch_add(1, order), $int_type::MAX);
313 assert_eq!(a.load(Ordering::Relaxed), $int_type::MAX.wrapping_add(1));
314 }
315 }
316 #[test]
317 fn add() {
318 let a = <$atomic_type>::new(0);
319 test_swap_ordering(|order| a.add(0, order));
320 for &order in &test_helper::SWAP_ORDERINGS {
321 let a = <$atomic_type>::new(0);
322 a.add(10, order);
323 assert_eq!(a.load(Ordering::Relaxed), 10);
324 let a = <$atomic_type>::new($int_type::MAX);
325 a.add(1, order);
326 assert_eq!(a.load(Ordering::Relaxed), $int_type::MAX.wrapping_add(1));
327 }
328 }
329 #[test]
330 fn fetch_sub() {
331 let a = <$atomic_type>::new(20);
332 test_swap_ordering(|order| a.fetch_sub(0, order));
333 for &order in &test_helper::SWAP_ORDERINGS {
334 let a = <$atomic_type>::new(20);
335 assert_eq!(a.fetch_sub(10, order), 20);
336 assert_eq!(a.load(Ordering::Relaxed), 10);
337 let a = <$atomic_type>::new($int_type::MIN);
338 assert_eq!(a.fetch_sub(1, order), $int_type::MIN);
339 assert_eq!(a.load(Ordering::Relaxed), $int_type::MIN.wrapping_sub(1));
340 }
341 }
342 #[test]
343 fn sub() {
344 let a = <$atomic_type>::new(20);
345 test_swap_ordering(|order| a.sub(0, order));
346 for &order in &test_helper::SWAP_ORDERINGS {
347 let a = <$atomic_type>::new(20);
348 a.sub(10, order);
349 assert_eq!(a.load(Ordering::Relaxed), 10);
350 let a = <$atomic_type>::new($int_type::MIN);
351 a.sub(1, order);
352 assert_eq!(a.load(Ordering::Relaxed), $int_type::MIN.wrapping_sub(1));
353 }
354 }
355 #[test]
356 fn fetch_and() {
357 let a = <$atomic_type>::new(0b101101);
358 test_swap_ordering(|order| a.fetch_and(0b101101, order));
359 for &order in &test_helper::SWAP_ORDERINGS {
360 let a = <$atomic_type>::new(0b101101);
361 assert_eq!(a.fetch_and(0b110011, order), 0b101101);
362 assert_eq!(a.load(Ordering::Relaxed), 0b100001);
363 }
364 }
365 #[test]
366 fn and() {
367 let a = <$atomic_type>::new(0b101101);
368 test_swap_ordering(|order| a.and(0b101101, order));
369 for &order in &test_helper::SWAP_ORDERINGS {
370 let a = <$atomic_type>::new(0b101101);
371 a.and(0b110011, order);
372 assert_eq!(a.load(Ordering::Relaxed), 0b100001);
373 }
374 }
375 #[test]
376 fn fetch_nand() {
377 let a = <$atomic_type>::new(0x13);
378 test_swap_ordering(|order| a.fetch_nand(0x31, order));
379 for &order in &test_helper::SWAP_ORDERINGS {
380 let a = <$atomic_type>::new(0x13);
381 assert_eq!(a.fetch_nand(0x31, order), 0x13);
382 assert_eq!(a.load(Ordering::Relaxed), !(0x13 & 0x31));
383 }
384 }
385 #[test]
386 fn fetch_or() {
387 let a = <$atomic_type>::new(0b101101);
388 test_swap_ordering(|order| a.fetch_or(0, order));
389 for &order in &test_helper::SWAP_ORDERINGS {
390 let a = <$atomic_type>::new(0b101101);
391 assert_eq!(a.fetch_or(0b110011, order), 0b101101);
392 assert_eq!(a.load(Ordering::Relaxed), 0b111111);
393 }
394 }
395 #[test]
396 fn or() {
397 let a = <$atomic_type>::new(0b101101);
398 test_swap_ordering(|order| a.or(0, order));
399 for &order in &test_helper::SWAP_ORDERINGS {
400 let a = <$atomic_type>::new(0b101101);
401 a.or(0b110011, order);
402 assert_eq!(a.load(Ordering::Relaxed), 0b111111);
403 }
404 }
405 #[test]
406 fn fetch_xor() {
407 let a = <$atomic_type>::new(0b101101);
408 test_swap_ordering(|order| a.fetch_xor(0, order));
409 for &order in &test_helper::SWAP_ORDERINGS {
410 let a = <$atomic_type>::new(0b101101);
411 assert_eq!(a.fetch_xor(0b110011, order), 0b101101);
412 assert_eq!(a.load(Ordering::Relaxed), 0b011110);
413 }
414 }
415 #[test]
416 fn xor() {
417 let a = <$atomic_type>::new(0b101101);
418 test_swap_ordering(|order| a.xor(0, order));
419 for &order in &test_helper::SWAP_ORDERINGS {
420 let a = <$atomic_type>::new(0b101101);
421 a.xor(0b110011, order);
422 assert_eq!(a.load(Ordering::Relaxed), 0b011110);
423 }
424 }
425 #[test]
426 fn fetch_max() {
427 let a = <$atomic_type>::new(23);
428 test_swap_ordering(|order| a.fetch_max(23, order));
429 for &order in &test_helper::SWAP_ORDERINGS {
430 let a = <$atomic_type>::new(23);
431 assert_eq!(a.fetch_max(22, order), 23);
432 assert_eq!(a.load(Ordering::Relaxed), 23);
433 assert_eq!(a.fetch_max(24, order), 23);
434 assert_eq!(a.load(Ordering::Relaxed), 24);
435 let a = <$atomic_type>::new(0);
436 assert_eq!(a.fetch_max(1, order), 0);
437 assert_eq!(a.load(Ordering::Relaxed), 1);
438 assert_eq!(a.fetch_max(0, order), 1);
439 assert_eq!(a.load(Ordering::Relaxed), 1);
440 let a = <$atomic_type>::new((0 as $int_type).wrapping_sub(1));
441 assert_eq!(a.fetch_max(0, order), (0 as $int_type).wrapping_sub(1));
442 assert_eq!(
443 a.load(Ordering::Relaxed),
444 core::cmp::max((0 as $int_type).wrapping_sub(1), 0)
445 );
446 }
447 }
448 #[test]
449 fn fetch_min() {
450 let a = <$atomic_type>::new(23);
451 test_swap_ordering(|order| a.fetch_min(23, order));
452 for &order in &test_helper::SWAP_ORDERINGS {
453 let a = <$atomic_type>::new(23);
454 assert_eq!(a.fetch_min(24, order), 23);
455 assert_eq!(a.load(Ordering::Relaxed), 23);
456 assert_eq!(a.fetch_min(22, order), 23);
457 assert_eq!(a.load(Ordering::Relaxed), 22);
458 let a = <$atomic_type>::new(1);
459 assert_eq!(a.fetch_min(0, order), 1);
460 assert_eq!(a.load(Ordering::Relaxed), 0);
461 assert_eq!(a.fetch_min(1, order), 0);
462 assert_eq!(a.load(Ordering::Relaxed), 0);
463 let a = <$atomic_type>::new((0 as $int_type).wrapping_sub(1));
464 assert_eq!(a.fetch_min(0, order), (0 as $int_type).wrapping_sub(1));
465 assert_eq!(
466 a.load(Ordering::Relaxed),
467 core::cmp::min((0 as $int_type).wrapping_sub(1), 0)
468 );
469 }
470 }
471 #[test]
472 fn fetch_not() {
473 let a = <$atomic_type>::new(1);
474 test_swap_ordering(|order| a.fetch_not(order));
475 for &order in &test_helper::SWAP_ORDERINGS {
476 let a = <$atomic_type>::new(1);
477 assert_eq!(a.fetch_not(order), 1);
478 assert_eq!(a.load(Ordering::Relaxed), !1);
479 }
480 }
481 #[test]
482 fn not() {
483 let a = <$atomic_type>::new(1);
484 test_swap_ordering(|order| a.not(order));
485 for &order in &test_helper::SWAP_ORDERINGS {
486 let a = <$atomic_type>::new(1);
487 a.not(order);
488 assert_eq!(a.load(Ordering::Relaxed), !1);
489 }
490 }
491 #[test]
492 fn fetch_neg() {
493 let a = <$atomic_type>::new(5);
494 test_swap_ordering(|order| a.fetch_neg(order));
495 for &order in &test_helper::SWAP_ORDERINGS {
496 let a = <$atomic_type>::new(5);
497 assert_eq!(a.fetch_neg(order), 5);
498 assert_eq!(a.load(Ordering::Relaxed), (5 as $int_type).wrapping_neg());
499 assert_eq!(a.fetch_neg(order), (5 as $int_type).wrapping_neg());
500 assert_eq!(a.load(Ordering::Relaxed), 5);
501 let a = <$atomic_type>::new(<$int_type>::MIN);
502 assert_eq!(a.fetch_neg(order), <$int_type>::MIN);
503 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN.wrapping_neg());
504 assert_eq!(a.fetch_neg(order), <$int_type>::MIN.wrapping_neg());
505 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN);
506 }
507 }
508 #[test]
509 fn neg() {
510 let a = <$atomic_type>::new(5);
511 test_swap_ordering(|order| a.neg(order));
512 for &order in &test_helper::SWAP_ORDERINGS {
513 let a = <$atomic_type>::new(5);
514 a.neg(order);
515 assert_eq!(a.load(Ordering::Relaxed), (5 as $int_type).wrapping_neg());
516 a.neg(order);
517 assert_eq!(a.load(Ordering::Relaxed), 5);
518 let a = <$atomic_type>::new(<$int_type>::MIN);
519 a.neg(order);
520 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN.wrapping_neg());
521 a.neg(order);
522 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN);
523 }
524 }
525 #[test]
526 fn bit_set() {
527 let a = <$atomic_type>::new(0b0001);
528 test_swap_ordering(|order| assert!(a.bit_set(0, order)));
529 for &order in &test_helper::SWAP_ORDERINGS {
530 let a = <$atomic_type>::new(0b0000);
531 assert!(!a.bit_set(0, order));
532 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
533 assert!(a.bit_set(0, order));
534 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
535 }
536 }
537 #[test]
538 fn bit_clear() {
539 let a = <$atomic_type>::new(0b0000);
540 test_swap_ordering(|order| assert!(!a.bit_clear(0, order)));
541 for &order in &test_helper::SWAP_ORDERINGS {
542 let a = <$atomic_type>::new(0b0001);
543 assert!(a.bit_clear(0, order));
544 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
545 assert!(!a.bit_clear(0, order));
546 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
547 }
548 }
549 #[test]
550 fn bit_toggle() {
551 let a = <$atomic_type>::new(0b0000);
552 test_swap_ordering(|order| a.bit_toggle(0, order));
553 for &order in &test_helper::SWAP_ORDERINGS {
554 let a = <$atomic_type>::new(0b0000);
555 assert!(!a.bit_toggle(0, order));
556 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
557 assert!(a.bit_toggle(0, order));
558 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
559 }
560 }
561 ::quickcheck::quickcheck! {
562 fn quickcheck_swap(x: $int_type, y: $int_type) -> bool {
563 for &order in &test_helper::SWAP_ORDERINGS {
564 let a = <$atomic_type>::new(x);
565 assert_eq!(a.swap(y, order), x);
566 assert_eq!(a.swap(x, order), y);
567 }
568 true
569 }
570 fn quickcheck_compare_exchange(x: $int_type, y: $int_type) -> bool {
571 #[cfg(all(
572 target_arch = "arm",
573 not(any(target_feature = "v6", portable_atomic_target_feature = "v6")),
574 ))]
575 {
576 // TODO: LLVM bug:
577 // https://github.com/llvm/llvm-project/issues/61880
578 // https://github.com/taiki-e/portable-atomic/issues/2
579 if core::mem::size_of::<$int_type>() <= 2 {
580 return true;
581 }
582 }
583 let z = loop {
584 let z = fastrand::$int_type(..);
585 if z != y {
586 break z;
587 }
588 };
589 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
590 let a = <$atomic_type>::new(x);
591 assert_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
592 assert_eq!(a.load(Ordering::Relaxed), y);
593 assert_eq!(a.compare_exchange(z, x, success, failure).unwrap_err(), y);
594 assert_eq!(a.load(Ordering::Relaxed), y);
595 }
596 true
597 }
598 fn quickcheck_fetch_add(x: $int_type, y: $int_type) -> bool {
599 for &order in &test_helper::SWAP_ORDERINGS {
600 let a = <$atomic_type>::new(x);
601 assert_eq!(a.fetch_add(y, order), x);
602 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_add(y));
603 let a = <$atomic_type>::new(y);
604 assert_eq!(a.fetch_add(x, order), y);
605 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_add(x));
606 }
607 true
608 }
609 fn quickcheck_add(x: $int_type, y: $int_type) -> bool {
610 for &order in &test_helper::SWAP_ORDERINGS {
611 let a = <$atomic_type>::new(x);
612 a.add(y, order);
613 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_add(y));
614 let a = <$atomic_type>::new(y);
615 a.add(x, order);
616 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_add(x));
617 }
618 true
619 }
620 fn quickcheck_fetch_sub(x: $int_type, y: $int_type) -> bool {
621 for &order in &test_helper::SWAP_ORDERINGS {
622 let a = <$atomic_type>::new(x);
623 assert_eq!(a.fetch_sub(y, order), x);
624 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_sub(y));
625 let a = <$atomic_type>::new(y);
626 assert_eq!(a.fetch_sub(x, order), y);
627 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_sub(x));
628 }
629 true
630 }
631 fn quickcheck_sub(x: $int_type, y: $int_type) -> bool {
632 for &order in &test_helper::SWAP_ORDERINGS {
633 let a = <$atomic_type>::new(x);
634 a.sub(y, order);
635 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_sub(y));
636 let a = <$atomic_type>::new(y);
637 a.sub(x, order);
638 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_sub(x));
639 }
640 true
641 }
642 fn quickcheck_fetch_and(x: $int_type, y: $int_type) -> bool {
643 for &order in &test_helper::SWAP_ORDERINGS {
644 let a = <$atomic_type>::new(x);
645 assert_eq!(a.fetch_and(y, order), x);
646 assert_eq!(a.load(Ordering::Relaxed), x & y);
647 let a = <$atomic_type>::new(y);
648 assert_eq!(a.fetch_and(x, order), y);
649 assert_eq!(a.load(Ordering::Relaxed), y & x);
650 }
651 true
652 }
653 fn quickcheck_and(x: $int_type, y: $int_type) -> bool {
654 for &order in &test_helper::SWAP_ORDERINGS {
655 let a = <$atomic_type>::new(x);
656 a.and(y, order);
657 assert_eq!(a.load(Ordering::Relaxed), x & y);
658 let a = <$atomic_type>::new(y);
659 a.and(x, order);
660 assert_eq!(a.load(Ordering::Relaxed), y & x);
661 }
662 true
663 }
664 fn quickcheck_fetch_nand(x: $int_type, y: $int_type) -> bool {
665 for &order in &test_helper::SWAP_ORDERINGS {
666 let a = <$atomic_type>::new(x);
667 assert_eq!(a.fetch_nand(y, order), x);
668 assert_eq!(a.load(Ordering::Relaxed), !(x & y));
669 let a = <$atomic_type>::new(y);
670 assert_eq!(a.fetch_nand(x, order), y);
671 assert_eq!(a.load(Ordering::Relaxed), !(y & x));
672 }
673 true
674 }
675 fn quickcheck_fetch_or(x: $int_type, y: $int_type) -> bool {
676 for &order in &test_helper::SWAP_ORDERINGS {
677 let a = <$atomic_type>::new(x);
678 assert_eq!(a.fetch_or(y, order), x);
679 assert_eq!(a.load(Ordering::Relaxed), x | y);
680 let a = <$atomic_type>::new(y);
681 assert_eq!(a.fetch_or(x, order), y);
682 assert_eq!(a.load(Ordering::Relaxed), y | x);
683 }
684 true
685 }
686 fn quickcheck_or(x: $int_type, y: $int_type) -> bool {
687 for &order in &test_helper::SWAP_ORDERINGS {
688 let a = <$atomic_type>::new(x);
689 a.or(y, order);
690 assert_eq!(a.load(Ordering::Relaxed), x | y);
691 let a = <$atomic_type>::new(y);
692 a.or(x, order);
693 assert_eq!(a.load(Ordering::Relaxed), y | x);
694 }
695 true
696 }
697 fn quickcheck_fetch_xor(x: $int_type, y: $int_type) -> bool {
698 for &order in &test_helper::SWAP_ORDERINGS {
699 let a = <$atomic_type>::new(x);
700 assert_eq!(a.fetch_xor(y, order), x);
701 assert_eq!(a.load(Ordering::Relaxed), x ^ y);
702 let a = <$atomic_type>::new(y);
703 assert_eq!(a.fetch_xor(x, order), y);
704 assert_eq!(a.load(Ordering::Relaxed), y ^ x);
705 }
706 true
707 }
708 fn quickcheck_xor(x: $int_type, y: $int_type) -> bool {
709 for &order in &test_helper::SWAP_ORDERINGS {
710 let a = <$atomic_type>::new(x);
711 a.xor(y, order);
712 assert_eq!(a.load(Ordering::Relaxed), x ^ y);
713 let a = <$atomic_type>::new(y);
714 a.xor(x, order);
715 assert_eq!(a.load(Ordering::Relaxed), y ^ x);
716 }
717 true
718 }
719 fn quickcheck_fetch_max(x: $int_type, y: $int_type) -> bool {
720 for &order in &test_helper::SWAP_ORDERINGS {
721 let a = <$atomic_type>::new(x);
722 assert_eq!(a.fetch_max(y, order), x);
723 assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(x, y));
724 let a = <$atomic_type>::new(y);
725 assert_eq!(a.fetch_max(x, order), y);
726 assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(y, x));
727 }
728 true
729 }
730 fn quickcheck_fetch_min(x: $int_type, y: $int_type) -> bool {
731 for &order in &test_helper::SWAP_ORDERINGS {
732 let a = <$atomic_type>::new(x);
733 assert_eq!(a.fetch_min(y, order), x);
734 assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(x, y));
735 let a = <$atomic_type>::new(y);
736 assert_eq!(a.fetch_min(x, order), y);
737 assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(y, x));
738 }
739 true
740 }
741 fn quickcheck_fetch_not(x: $int_type) -> bool {
742 for &order in &test_helper::SWAP_ORDERINGS {
743 let a = <$atomic_type>::new(x);
744 assert_eq!(a.fetch_not(order), x);
745 assert_eq!(a.load(Ordering::Relaxed), !x);
746 assert_eq!(a.fetch_not(order), !x);
747 assert_eq!(a.load(Ordering::Relaxed), x);
748 }
749 true
750 }
751 fn quickcheck_not(x: $int_type) -> bool {
752 for &order in &test_helper::SWAP_ORDERINGS {
753 let a = <$atomic_type>::new(x);
754 a.not(order);
755 assert_eq!(a.load(Ordering::Relaxed), !x);
756 a.not(order);
757 assert_eq!(a.load(Ordering::Relaxed), x);
758 }
759 true
760 }
761 fn quickcheck_fetch_neg(x: $int_type) -> bool {
762 for &order in &test_helper::SWAP_ORDERINGS {
763 let a = <$atomic_type>::new(x);
764 assert_eq!(a.fetch_neg(order), x);
765 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_neg());
766 assert_eq!(a.fetch_neg(order), x.wrapping_neg());
767 assert_eq!(a.load(Ordering::Relaxed), x);
768 }
769 true
770 }
771 fn quickcheck_neg(x: $int_type) -> bool {
772 for &order in &test_helper::SWAP_ORDERINGS {
773 let a = <$atomic_type>::new(x);
774 a.neg(order);
775 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_neg());
776 a.neg(order);
777 assert_eq!(a.load(Ordering::Relaxed), x);
778 }
779 true
780 }
781 fn quickcheck_bit_set(x: $int_type, bit: u32) -> bool {
782 for &order in &test_helper::SWAP_ORDERINGS {
783 let a = <$atomic_type>::new(x);
784 let b = a.bit_set(bit, order);
785 let mask = (1 as $int_type).wrapping_shl(bit);
786 assert_eq!(a.load(Ordering::Relaxed), x | mask);
787 assert_eq!(b, x & mask != 0);
788 }
789 true
790 }
791 fn quickcheck_bit_clear(x: $int_type, bit: u32) -> bool {
792 for &order in &test_helper::SWAP_ORDERINGS {
793 let a = <$atomic_type>::new(x);
794 let b = a.bit_clear(bit, order);
795 let mask = (1 as $int_type).wrapping_shl(bit);
796 assert_eq!(a.load(Ordering::Relaxed), x & !mask);
797 assert_eq!(b, x & mask != 0);
798 }
799 true
800 }
801 fn quickcheck_bit_toggle(x: $int_type, bit: u32) -> bool {
802 for &order in &test_helper::SWAP_ORDERINGS {
803 let a = <$atomic_type>::new(x);
804 let b = a.bit_toggle(bit, order);
805 let mask = (1 as $int_type).wrapping_shl(bit);
806 assert_eq!(a.load(Ordering::Relaxed), x ^ mask);
807 assert_eq!(b, x & mask != 0);
808 }
809 true
810 }
811 }
812 };
813 ($atomic_type:ty, $int_type:ident) => {
814 __test_atomic_int!($atomic_type, $int_type, single_thread);
815
816 #[test]
817 fn stress_swap() {
818 let (iterations, threads) = stress_test_config();
819 let data1 = &(0..threads)
820 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
821 .collect::<Vec<_>>();
822 let data2 = &(0..threads)
823 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
824 .collect::<Vec<_>>();
825 let set = &data1
826 .iter()
827 .flat_map(|v| v.iter().copied())
828 .chain(data2.iter().flat_map(|v| v.iter().copied()))
829 .collect::<BTreeSet<_>>();
830 let a = &<$atomic_type>::new(data2[0][fastrand::usize(0..iterations)]);
831 let now = &std::time::Instant::now();
832 thread::scope(|s| {
833 for thread in 0..threads {
834 if thread % 2 == 0 {
835 s.spawn(move |_| {
836 let now = *now;
837 for i in 0..iterations {
838 a.store(data1[thread][i], rand_store_ordering());
839 }
840 std::eprintln!("store end={:?}", now.elapsed());
841 });
842 } else {
843 s.spawn(|_| {
844 let now = *now;
845 let mut v = vec![0; iterations];
846 for i in 0..iterations {
847 v[i] = a.load(rand_load_ordering());
848 }
849 std::eprintln!("load end={:?}", now.elapsed());
850 for v in v {
851 assert!(set.contains(&v), "v={}", v);
852 }
853 });
854 }
855 s.spawn(move |_| {
856 let now = *now;
857 let mut v = vec![0; iterations];
858 for i in 0..iterations {
859 v[i] = a.swap(data2[thread][i], rand_swap_ordering());
860 }
861 std::eprintln!("swap end={:?}", now.elapsed());
862 for v in v {
863 assert!(set.contains(&v), "v={}", v);
864 }
865 });
866 }
867 })
868 .unwrap();
869 }
870 #[test]
871 fn stress_compare_exchange() {
872 let (iterations, threads) = stress_test_config();
873 let data1 = &(0..threads)
874 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
875 .collect::<Vec<_>>();
876 let data2 = &(0..threads)
877 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
878 .collect::<Vec<_>>();
879 let set = &data1
880 .iter()
881 .flat_map(|v| v.iter().copied())
882 .chain(data2.iter().flat_map(|v| v.iter().copied()))
883 .collect::<BTreeSet<_>>();
884 let a = &<$atomic_type>::new(data2[0][fastrand::usize(0..iterations)]);
885 let now = &std::time::Instant::now();
886 thread::scope(|s| {
887 for thread in 0..threads {
888 s.spawn(move |_| {
889 let now = *now;
890 for i in 0..iterations {
891 a.store(data1[thread][i], rand_store_ordering());
892 }
893 std::eprintln!("store end={:?}", now.elapsed());
894 });
895 s.spawn(|_| {
896 let now = *now;
897 let mut v = vec![data2[0][0]; iterations];
898 for i in 0..iterations {
899 v[i] = a.load(rand_load_ordering());
900 }
901 std::eprintln!("load end={:?}", now.elapsed());
902 for v in v {
903 assert!(set.contains(&v), "v={}", v);
904 }
905 });
906 s.spawn(move |_| {
907 let now = *now;
908 let mut v = vec![data2[0][0]; iterations];
909 for i in 0..iterations {
910 let old = if i % 2 == 0 {
911 fastrand::$int_type(..)
912 } else {
913 a.load(Ordering::Relaxed)
914 };
915 let new = data2[thread][i];
916 let o = rand_compare_exchange_ordering();
917 match a.compare_exchange(old, new, o.0, o.1) {
918 Ok(r) => assert_eq!(old, r),
919 Err(r) => v[i] = r,
920 }
921 }
922 std::eprintln!("compare_exchange end={:?}", now.elapsed());
923 for v in v {
924 assert!(set.contains(&v), "v={}", v);
925 }
926 });
927 }
928 })
929 .unwrap();
930 }
931 };
932}
933macro_rules! __test_atomic_float {
934 ($atomic_type:ty, $float_type:ident, single_thread) => {
935 use core::$float_type;
936 #[test]
937 fn swap() {
938 let a = <$atomic_type>::new(5.0);
939 test_swap_ordering(|order| a.swap(5.0, order));
940 for &order in &test_helper::SWAP_ORDERINGS {
941 assert_eq!(a.swap(10.0, order), 5.0);
942 assert_eq!(a.swap(5.0, order), 10.0);
943 }
944 }
945 #[test]
946 fn compare_exchange() {
947 let a = <$atomic_type>::new(5.0);
948 test_compare_exchange_ordering(|success, failure| {
949 a.compare_exchange(5.0, 5.0, success, failure)
950 });
951 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
952 let a = <$atomic_type>::new(5.0);
953 assert_eq!(a.compare_exchange(5.0, 10.0, success, failure), Ok(5.0));
954 assert_eq!(a.load(Ordering::Relaxed), 10.0);
955 assert_eq!(a.compare_exchange(6.0, 12.0, success, failure), Err(10.0));
956 assert_eq!(a.load(Ordering::Relaxed), 10.0);
957 }
958 }
959 #[test]
960 fn compare_exchange_weak() {
961 let a = <$atomic_type>::new(4.0);
962 test_compare_exchange_ordering(|success, failure| {
963 a.compare_exchange_weak(4.0, 4.0, success, failure)
964 });
965 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
966 let a = <$atomic_type>::new(4.0);
967 assert_eq!(a.compare_exchange_weak(6.0, 8.0, success, failure), Err(4.0));
968 let mut old = a.load(Ordering::Relaxed);
969 loop {
970 let new = old * 2.0;
971 match a.compare_exchange_weak(old, new, success, failure) {
972 Ok(_) => break,
973 Err(x) => old = x,
974 }
975 }
976 assert_eq!(a.load(Ordering::Relaxed), 8.0);
977 }
978 }
979 #[test]
980 fn fetch_add() {
981 let a = <$atomic_type>::new(0.0);
982 test_swap_ordering(|order| a.fetch_add(0.0, order));
983 for &order in &test_helper::SWAP_ORDERINGS {
984 let a = <$atomic_type>::new(0.0);
985 assert_eq!(a.fetch_add(10.0, order), 0.0);
986 assert_eq!(a.load(Ordering::Relaxed), 10.0);
987 let a = <$atomic_type>::new($float_type::MAX);
988 assert_eq!(a.fetch_add(1.0, order), $float_type::MAX);
989 assert_eq!(a.load(Ordering::Relaxed), $float_type::MAX + 1.0);
990 }
991 }
992 #[test]
993 fn fetch_sub() {
994 let a = <$atomic_type>::new(20.0);
995 test_swap_ordering(|order| a.fetch_sub(0.0, order));
996 for &order in &test_helper::SWAP_ORDERINGS {
997 let a = <$atomic_type>::new(20.0);
998 assert_eq!(a.fetch_sub(10.0, order), 20.0);
999 assert_eq!(a.load(Ordering::Relaxed), 10.0);
1000 let a = <$atomic_type>::new($float_type::MIN);
1001 assert_eq!(a.fetch_sub(1.0, order), $float_type::MIN);
1002 assert_eq!(a.load(Ordering::Relaxed), $float_type::MIN - 1.0);
1003 }
1004 }
1005 #[test]
1006 fn fetch_max() {
1007 let a = <$atomic_type>::new(23.0);
1008 test_swap_ordering(|order| a.fetch_max(23.0, order));
1009 for &order in &test_helper::SWAP_ORDERINGS {
1010 let a = <$atomic_type>::new(23.0);
1011 assert_eq!(a.fetch_max(22.0, order), 23.0);
1012 assert_eq!(a.load(Ordering::Relaxed), 23.0);
1013 assert_eq!(a.fetch_max(24.0, order), 23.0);
1014 assert_eq!(a.load(Ordering::Relaxed), 24.0);
1015 }
1016 }
1017 #[test]
1018 fn fetch_min() {
1019 let a = <$atomic_type>::new(23.0);
1020 test_swap_ordering(|order| a.fetch_min(23.0, order));
1021 for &order in &test_helper::SWAP_ORDERINGS {
1022 let a = <$atomic_type>::new(23.0);
1023 assert_eq!(a.fetch_min(24.0, order), 23.0);
1024 assert_eq!(a.load(Ordering::Relaxed), 23.0);
1025 assert_eq!(a.fetch_min(22.0, order), 23.0);
1026 assert_eq!(a.load(Ordering::Relaxed), 22.0);
1027 }
1028 }
1029 #[test]
1030 fn fetch_neg() {
1031 let a = <$atomic_type>::new(5.0);
1032 test_swap_ordering(|order| a.fetch_neg(order));
1033 for &order in &test_helper::SWAP_ORDERINGS {
1034 let a = <$atomic_type>::new(5.0);
1035 assert_eq!(a.fetch_neg(order), 5.0);
1036 assert_eq!(a.load(Ordering::Relaxed), -5.0);
1037 assert_eq!(a.fetch_neg(order), -5.0);
1038 assert_eq!(a.load(Ordering::Relaxed), 5.0);
1039 }
1040 }
1041 #[test]
1042 fn fetch_abs() {
1043 let a = <$atomic_type>::new(23.0);
1044 test_swap_ordering(|order| a.fetch_abs(order));
1045 for &order in &test_helper::SWAP_ORDERINGS {
1046 let a = <$atomic_type>::new(-23.0);
1047 assert_eq!(a.fetch_abs(order), -23.0);
1048 assert_eq!(a.load(Ordering::Relaxed), 23.0);
1049 assert_eq!(a.fetch_abs(order), 23.0);
1050 assert_eq!(a.load(Ordering::Relaxed), 23.0);
1051 }
1052 }
1053 ::quickcheck::quickcheck! {
1054 fn quickcheck_swap(x: $float_type, y: $float_type) -> bool {
1055 for &order in &test_helper::SWAP_ORDERINGS {
1056 let a = <$atomic_type>::new(x);
1057 assert_float_op_eq!(a.swap(y, order), x);
1058 assert_float_op_eq!(a.swap(x, order), y);
1059 }
1060 true
1061 }
1062 fn quickcheck_compare_exchange(x: $float_type, y: $float_type) -> bool {
1063 let z = loop {
1064 let z = fastrand::$float_type();
1065 if z != y {
1066 break z;
1067 }
1068 };
1069 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1070 let a = <$atomic_type>::new(x);
1071 assert_float_op_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
1072 assert_float_op_eq!(a.load(Ordering::Relaxed), y);
1073 assert_float_op_eq!(
1074 a.compare_exchange(z, x, success, failure).unwrap_err(),
1075 y,
1076 );
1077 assert_float_op_eq!(a.load(Ordering::Relaxed), y);
1078 }
1079 true
1080 }
1081 fn quickcheck_fetch_add(x: $float_type, y: $float_type) -> bool {
1082 if cfg!(all(not(debug_assertions), target_arch = "x86", not(target_feature = "sse2"))) {
1083 // TODO: rustc bug:
1084 // https://github.com/rust-lang/rust/issues/72327
1085 // https://github.com/rust-lang/rust/issues/73288
1086 return true;
1087 }
1088 for &order in &test_helper::SWAP_ORDERINGS {
1089 let a = <$atomic_type>::new(x);
1090 assert_float_op_eq!(a.fetch_add(y, order), x);
1091 assert_float_op_eq!(a.load(Ordering::Relaxed), x + y);
1092 let a = <$atomic_type>::new(y);
1093 assert_float_op_eq!(a.fetch_add(x, order), y);
1094 assert_float_op_eq!(a.load(Ordering::Relaxed), y + x);
1095 }
1096 true
1097 }
1098 fn quickcheck_fetch_sub(x: $float_type, y: $float_type) -> bool {
1099 if cfg!(all(not(debug_assertions), target_arch = "x86", not(target_feature = "sse2"))) {
1100 // TODO: rustc bug:
1101 // https://github.com/rust-lang/rust/issues/72327
1102 // https://github.com/rust-lang/rust/issues/73288
1103 return true;
1104 }
1105 for &order in &test_helper::SWAP_ORDERINGS {
1106 let a = <$atomic_type>::new(x);
1107 assert_float_op_eq!(a.fetch_sub(y, order), x);
1108 assert_float_op_eq!(a.load(Ordering::Relaxed), x - y);
1109 let a = <$atomic_type>::new(y);
1110 assert_float_op_eq!(a.fetch_sub(x, order), y);
1111 assert_float_op_eq!(a.load(Ordering::Relaxed), y - x);
1112 }
1113 true
1114 }
1115 fn quickcheck_fetch_max(x: $float_type, y: $float_type) -> bool {
1116 for &order in &test_helper::SWAP_ORDERINGS {
1117 let a = <$atomic_type>::new(x);
1118 assert_float_op_eq!(a.fetch_max(y, order), x);
1119 assert_float_op_eq!(a.load(Ordering::Relaxed), x.max(y));
1120 let a = <$atomic_type>::new(y);
1121 assert_float_op_eq!(a.fetch_max(x, order), y);
1122 assert_float_op_eq!(a.load(Ordering::Relaxed), y.max(x));
1123 }
1124 true
1125 }
1126 fn quickcheck_fetch_min(x: $float_type, y: $float_type) -> bool {
1127 for &order in &test_helper::SWAP_ORDERINGS {
1128 let a = <$atomic_type>::new(x);
1129 assert_float_op_eq!(a.fetch_min(y, order), x);
1130 assert_float_op_eq!(a.load(Ordering::Relaxed), x.min(y));
1131 let a = <$atomic_type>::new(y);
1132 assert_float_op_eq!(a.fetch_min(x, order), y);
1133 assert_float_op_eq!(a.load(Ordering::Relaxed), y.min(x));
1134 }
1135 true
1136 }
1137 fn quickcheck_fetch_neg(x: $float_type) -> bool {
1138 for &order in &test_helper::SWAP_ORDERINGS {
1139 let a = <$atomic_type>::new(x);
1140 assert_float_op_eq!(a.fetch_neg(order), x);
1141 assert_float_op_eq!(a.load(Ordering::Relaxed), -x);
1142 assert_float_op_eq!(a.fetch_neg(order), -x);
1143 assert_float_op_eq!(a.load(Ordering::Relaxed), x);
1144 }
1145 true
1146 }
1147 fn quickcheck_fetch_abs(x: $float_type) -> bool {
1148 for &order in &test_helper::SWAP_ORDERINGS {
1149 let a = <$atomic_type>::new(x);
1150 assert_float_op_eq!(a.fetch_abs(order), x);
1151 assert_float_op_eq!(a.fetch_abs(order), x.abs());
1152 assert_float_op_eq!(a.load(Ordering::Relaxed), x.abs());
1153 }
1154 true
1155 }
1156 }
1157 };
1158 ($atomic_type:ty, $float_type:ident) => {
1159 __test_atomic_float!($atomic_type, $float_type, single_thread);
1160 // TODO: multi thread
1161 };
1162}
1163macro_rules! __test_atomic_bool {
1164 ($atomic_type:ty, single_thread) => {
1165 #[test]
1166 fn swap() {
1167 let a = <$atomic_type>::new(true);
1168 test_swap_ordering(|order| a.swap(true, order));
1169 for &order in &test_helper::SWAP_ORDERINGS {
1170 assert_eq!(a.swap(true, order), true);
1171 assert_eq!(a.swap(false, order), true);
1172 assert_eq!(a.swap(false, order), false);
1173 assert_eq!(a.swap(true, order), false);
1174 }
1175 }
1176 #[test]
1177 fn compare_exchange() {
1178 let a = <$atomic_type>::new(true);
1179 test_compare_exchange_ordering(|success, failure| {
1180 a.compare_exchange(true, true, success, failure)
1181 });
1182 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1183 let a = <$atomic_type>::new(true);
1184 assert_eq!(a.compare_exchange(true, false, success, failure), Ok(true));
1185 assert_eq!(a.load(Ordering::Relaxed), false);
1186 assert_eq!(a.compare_exchange(true, true, success, failure), Err(false));
1187 assert_eq!(a.load(Ordering::Relaxed), false);
1188 }
1189 }
1190 #[test]
1191 fn compare_exchange_weak() {
1192 let a = <$atomic_type>::new(false);
1193 test_compare_exchange_ordering(|success, failure| {
1194 a.compare_exchange_weak(false, false, success, failure)
1195 });
1196 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1197 let a = <$atomic_type>::new(false);
1198 assert_eq!(a.compare_exchange_weak(true, true, success, failure), Err(false));
1199 let mut old = a.load(Ordering::Relaxed);
1200 let new = true;
1201 loop {
1202 match a.compare_exchange_weak(old, new, success, failure) {
1203 Ok(_) => break,
1204 Err(x) => old = x,
1205 }
1206 }
1207 assert_eq!(a.load(Ordering::Relaxed), true);
1208 }
1209 }
1210 #[test]
1211 fn fetch_and() {
1212 let a = <$atomic_type>::new(true);
1213 test_swap_ordering(|order| assert_eq!(a.fetch_and(true, order), true));
1214 for &order in &test_helper::SWAP_ORDERINGS {
1215 let a = <$atomic_type>::new(true);
1216 assert_eq!(a.fetch_and(false, order), true);
1217 assert_eq!(a.load(Ordering::Relaxed), false);
1218 let a = <$atomic_type>::new(true);
1219 assert_eq!(a.fetch_and(true, order), true);
1220 assert_eq!(a.load(Ordering::Relaxed), true);
1221 let a = <$atomic_type>::new(false);
1222 assert_eq!(a.fetch_and(false, order), false);
1223 assert_eq!(a.load(Ordering::Relaxed), false);
1224 let a = <$atomic_type>::new(false);
1225 assert_eq!(a.fetch_and(true, order), false);
1226 assert_eq!(a.load(Ordering::Relaxed), false);
1227 }
1228 }
1229 #[test]
1230 fn and() {
1231 let a = <$atomic_type>::new(true);
1232 test_swap_ordering(|order| a.and(true, order));
1233 for &order in &test_helper::SWAP_ORDERINGS {
1234 let a = <$atomic_type>::new(true);
1235 a.and(false, order);
1236 assert_eq!(a.load(Ordering::Relaxed), false);
1237 let a = <$atomic_type>::new(true);
1238 a.and(true, order);
1239 assert_eq!(a.load(Ordering::Relaxed), true);
1240 let a = <$atomic_type>::new(false);
1241 a.and(false, order);
1242 assert_eq!(a.load(Ordering::Relaxed), false);
1243 let a = <$atomic_type>::new(false);
1244 a.and(true, order);
1245 assert_eq!(a.load(Ordering::Relaxed), false);
1246 }
1247 }
1248 #[test]
1249 fn fetch_or() {
1250 let a = <$atomic_type>::new(true);
1251 test_swap_ordering(|order| assert_eq!(a.fetch_or(false, order), true));
1252 for &order in &test_helper::SWAP_ORDERINGS {
1253 let a = <$atomic_type>::new(true);
1254 assert_eq!(a.fetch_or(false, order), true);
1255 assert_eq!(a.load(Ordering::Relaxed), true);
1256 let a = <$atomic_type>::new(true);
1257 assert_eq!(a.fetch_or(true, order), true);
1258 assert_eq!(a.load(Ordering::Relaxed), true);
1259 let a = <$atomic_type>::new(false);
1260 assert_eq!(a.fetch_or(false, order), false);
1261 assert_eq!(a.load(Ordering::Relaxed), false);
1262 let a = <$atomic_type>::new(false);
1263 assert_eq!(a.fetch_or(true, order), false);
1264 assert_eq!(a.load(Ordering::Relaxed), true);
1265 }
1266 }
1267 #[test]
1268 fn or() {
1269 let a = <$atomic_type>::new(true);
1270 test_swap_ordering(|order| a.or(false, order));
1271 for &order in &test_helper::SWAP_ORDERINGS {
1272 let a = <$atomic_type>::new(true);
1273 a.or(false, order);
1274 assert_eq!(a.load(Ordering::Relaxed), true);
1275 let a = <$atomic_type>::new(true);
1276 a.or(true, order);
1277 assert_eq!(a.load(Ordering::Relaxed), true);
1278 let a = <$atomic_type>::new(false);
1279 a.or(false, order);
1280 assert_eq!(a.load(Ordering::Relaxed), false);
1281 let a = <$atomic_type>::new(false);
1282 a.or(true, order);
1283 assert_eq!(a.load(Ordering::Relaxed), true);
1284 }
1285 }
1286 #[test]
1287 fn fetch_xor() {
1288 let a = <$atomic_type>::new(true);
1289 test_swap_ordering(|order| assert_eq!(a.fetch_xor(false, order), true));
1290 for &order in &test_helper::SWAP_ORDERINGS {
1291 let a = <$atomic_type>::new(true);
1292 assert_eq!(a.fetch_xor(false, order), true);
1293 assert_eq!(a.load(Ordering::Relaxed), true);
1294 let a = <$atomic_type>::new(true);
1295 assert_eq!(a.fetch_xor(true, order), true);
1296 assert_eq!(a.load(Ordering::Relaxed), false);
1297 let a = <$atomic_type>::new(false);
1298 assert_eq!(a.fetch_xor(false, order), false);
1299 assert_eq!(a.load(Ordering::Relaxed), false);
1300 let a = <$atomic_type>::new(false);
1301 assert_eq!(a.fetch_xor(true, order), false);
1302 assert_eq!(a.load(Ordering::Relaxed), true);
1303 }
1304 }
1305 #[test]
1306 fn xor() {
1307 let a = <$atomic_type>::new(true);
1308 test_swap_ordering(|order| a.xor(false, order));
1309 for &order in &test_helper::SWAP_ORDERINGS {
1310 let a = <$atomic_type>::new(true);
1311 a.xor(false, order);
1312 assert_eq!(a.load(Ordering::Relaxed), true);
1313 let a = <$atomic_type>::new(true);
1314 a.xor(true, order);
1315 assert_eq!(a.load(Ordering::Relaxed), false);
1316 let a = <$atomic_type>::new(false);
1317 a.xor(false, order);
1318 assert_eq!(a.load(Ordering::Relaxed), false);
1319 let a = <$atomic_type>::new(false);
1320 a.xor(true, order);
1321 assert_eq!(a.load(Ordering::Relaxed), true);
1322 }
1323 }
1324 ::quickcheck::quickcheck! {
1325 fn quickcheck_compare_exchange(x: bool, y: bool) -> bool {
1326 let z = !y;
1327 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1328 let a = <$atomic_type>::new(x);
1329 assert_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
1330 assert_eq!(a.load(Ordering::Relaxed), y);
1331 assert_eq!(a.compare_exchange(z, x, success, failure).unwrap_err(), y);
1332 assert_eq!(a.load(Ordering::Relaxed), y);
1333 }
1334 true
1335 }
1336 }
1337 };
1338 ($atomic_type:ty) => {
1339 __test_atomic_bool!($atomic_type, single_thread);
1340 // TODO: multi thread
1341 };
1342}
1343macro_rules! __test_atomic_ptr {
1344 ($atomic_type:ty, single_thread) => {
1345 #[test]
1346 fn swap() {
1347 let a = <$atomic_type>::new(ptr::null_mut());
1348 test_swap_ordering(|order| a.swap(ptr::null_mut(), order));
1349 let x = &mut 1;
1350 for &order in &test_helper::SWAP_ORDERINGS {
1351 assert_eq!(a.swap(x, order), ptr::null_mut());
1352 assert_eq!(a.swap(ptr::null_mut(), order), x as _);
1353 }
1354 }
1355 #[test]
1356 fn compare_exchange() {
1357 let a = <$atomic_type>::new(ptr::null_mut());
1358 test_compare_exchange_ordering(|success, failure| {
1359 a.compare_exchange(ptr::null_mut(), ptr::null_mut(), success, failure)
1360 });
1361 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1362 let a = <$atomic_type>::new(ptr::null_mut());
1363 let x = &mut 1;
1364 assert_eq!(
1365 a.compare_exchange(ptr::null_mut(), x, success, failure),
1366 Ok(ptr::null_mut()),
1367 );
1368 assert_eq!(a.load(Ordering::Relaxed), x as _);
1369 assert_eq!(
1370 a.compare_exchange(ptr::null_mut(), ptr::null_mut(), success, failure),
1371 Err(x as _),
1372 );
1373 assert_eq!(a.load(Ordering::Relaxed), x as _);
1374 }
1375 }
1376 #[test]
1377 fn compare_exchange_weak() {
1378 let a = <$atomic_type>::new(ptr::null_mut());
1379 test_compare_exchange_ordering(|success, failure| {
1380 a.compare_exchange_weak(ptr::null_mut(), ptr::null_mut(), success, failure)
1381 });
1382 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1383 let a = <$atomic_type>::new(ptr::null_mut());
1384 let x = &mut 1;
1385 assert_eq!(a.compare_exchange_weak(x, x, success, failure), Err(ptr::null_mut()));
1386 let mut old = a.load(Ordering::Relaxed);
1387 loop {
1388 match a.compare_exchange_weak(old, x, success, failure) {
1389 Ok(_) => break,
1390 Err(x) => old = x,
1391 }
1392 }
1393 assert_eq!(a.load(Ordering::Relaxed), x as _);
1394 }
1395 }
1396 };
1397 ($atomic_type:ty) => {
1398 __test_atomic_ptr!($atomic_type, single_thread);
1399 // TODO: multi thread
1400 };
1401}
1402
1403macro_rules! __test_atomic_int_load_store_pub {
1404 ($atomic_type:ty, $int_type:ident) => {
1405 __test_atomic_pub_common!($atomic_type, $int_type);
ed00b5ec 1406 use std::{boxed::Box, mem};
781aab86
FG
1407 #[test]
1408 fn impls() {
1409 let a = <$atomic_type>::default();
1410 let b = <$atomic_type>::from(0);
1411 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1412 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
ed00b5ec
FG
1413
1414 unsafe {
1415 let ptr: *mut Align16<$int_type> = Box::into_raw(Box::new(Align16(0)));
1416 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1417 {
1418 let a = <$atomic_type>::from_ptr(ptr.cast::<$int_type>());
1419 *a.as_ptr() = 1;
1420 }
1421 assert_eq!((*ptr).0, 1);
1422 drop(Box::from_raw(ptr));
1423 }
781aab86
FG
1424 }
1425 };
1426}
1427macro_rules! __test_atomic_int_pub {
1428 ($atomic_type:ty, $int_type:ident) => {
1429 #[test]
1430 fn fetch_update() {
1431 let a = <$atomic_type>::new(7);
1432 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1433 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1434 let a = <$atomic_type>::new(7);
1435 assert_eq!(a.fetch_update(success, failure, |_| None), Err(7));
1436 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1)), Ok(7));
1437 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1)), Ok(8));
1438 assert_eq!(a.load(Ordering::SeqCst), 9);
1439 }
1440 }
1441 ::quickcheck::quickcheck! {
1442 fn quickcheck_fetch_update(x: $int_type, y: $int_type) -> bool {
1443 let z = loop {
1444 let z = fastrand::$int_type(..);
1445 if z != y {
1446 break z;
1447 }
1448 };
1449 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1450 let a = <$atomic_type>::new(x);
1451 assert_eq!(
1452 a.fetch_update(success, failure, |_| Some(y))
1453 .unwrap(),
1454 x
1455 );
1456 assert_eq!(
1457 a.fetch_update(success, failure, |_| Some(z))
1458 .unwrap(),
1459 y
1460 );
1461 assert_eq!(a.load(Ordering::Relaxed), z);
1462 assert_eq!(
1463 a.fetch_update(success, failure, |z| if z == y { Some(z) } else { None })
1464 .unwrap_err(),
1465 z
1466 );
1467 assert_eq!(a.load(Ordering::Relaxed), z);
1468 }
1469 true
1470 }
1471 }
1472 };
1473}
1474macro_rules! __test_atomic_float_pub {
1475 ($atomic_type:ty, $float_type:ident) => {
1476 __test_atomic_pub_common!($atomic_type, $float_type);
ed00b5ec 1477 use std::{boxed::Box, mem};
781aab86
FG
1478 #[test]
1479 fn fetch_update() {
1480 let a = <$atomic_type>::new(7.0);
1481 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1482 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1483 let a = <$atomic_type>::new(7.0);
1484 assert_eq!(a.fetch_update(success, failure, |_| None), Err(7.0));
1485 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1.0)), Ok(7.0));
1486 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1.0)), Ok(8.0));
1487 assert_eq!(a.load(Ordering::SeqCst), 9.0);
1488 }
1489 }
1490 #[test]
1491 fn impls() {
1492 let a = <$atomic_type>::default();
1493 let b = <$atomic_type>::from(0.0);
1494 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1495 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
ed00b5ec
FG
1496
1497 unsafe {
1498 let ptr: *mut Align16<$float_type> = Box::into_raw(Box::new(Align16(0.0)));
1499 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1500 {
1501 let a = <$atomic_type>::from_ptr(ptr.cast::<$float_type>());
1502 *a.as_ptr() = 1.0;
1503 }
1504 assert_eq!((*ptr).0, 1.0);
1505 drop(Box::from_raw(ptr));
1506 }
781aab86
FG
1507 }
1508 };
1509}
1510macro_rules! __test_atomic_bool_pub {
1511 ($atomic_type:ty) => {
1512 __test_atomic_pub_common!($atomic_type, bool);
ed00b5ec 1513 use std::{boxed::Box, mem};
781aab86
FG
1514 #[test]
1515 fn fetch_nand() {
1516 let a = <$atomic_type>::new(true);
1517 test_swap_ordering(|order| assert_eq!(a.fetch_nand(false, order), true));
1518 for &order in &test_helper::SWAP_ORDERINGS {
1519 let a = <$atomic_type>::new(true);
1520 assert_eq!(a.fetch_nand(false, order), true);
1521 assert_eq!(a.load(Ordering::Relaxed), true);
1522 let a = <$atomic_type>::new(true);
1523 assert_eq!(a.fetch_nand(true, order), true);
1524 assert_eq!(a.load(Ordering::Relaxed) as usize, 0);
1525 assert_eq!(a.load(Ordering::Relaxed), false);
1526 let a = <$atomic_type>::new(false);
1527 assert_eq!(a.fetch_nand(false, order), false);
1528 assert_eq!(a.load(Ordering::Relaxed), true);
1529 let a = <$atomic_type>::new(false);
1530 assert_eq!(a.fetch_nand(true, order), false);
1531 assert_eq!(a.load(Ordering::Relaxed), true);
1532 }
1533 }
1534 #[test]
1535 fn fetch_not() {
1536 let a = <$atomic_type>::new(true);
1537 test_swap_ordering(|order| a.fetch_not(order));
1538 for &order in &test_helper::SWAP_ORDERINGS {
1539 let a = <$atomic_type>::new(true);
1540 assert_eq!(a.fetch_not(order), true);
1541 assert_eq!(a.load(Ordering::Relaxed), false);
1542 let a = <$atomic_type>::new(false);
1543 assert_eq!(a.fetch_not(order), false);
1544 assert_eq!(a.load(Ordering::Relaxed), true);
1545 }
1546 }
1547 #[test]
1548 fn not() {
1549 let a = <$atomic_type>::new(true);
1550 test_swap_ordering(|order| a.fetch_not(order));
1551 for &order in &test_helper::SWAP_ORDERINGS {
1552 let a = <$atomic_type>::new(true);
1553 a.not(order);
1554 assert_eq!(a.load(Ordering::Relaxed), false);
1555 let a = <$atomic_type>::new(false);
1556 a.not(order);
1557 assert_eq!(a.load(Ordering::Relaxed), true);
1558 }
1559 }
1560 #[test]
1561 fn fetch_update() {
1562 let a = <$atomic_type>::new(false);
1563 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1564 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1565 let a = <$atomic_type>::new(false);
1566 assert_eq!(a.fetch_update(success, failure, |_| None), Err(false));
1567 assert_eq!(a.fetch_update(success, failure, |x| Some(!x)), Ok(false));
1568 assert_eq!(a.fetch_update(success, failure, |x| Some(!x)), Ok(true));
1569 assert_eq!(a.load(Ordering::SeqCst), false);
1570 }
1571 }
1572 #[test]
1573 fn impls() {
1574 let a = <$atomic_type>::default();
1575 let b = <$atomic_type>::from(false);
1576 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1577 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
ed00b5ec
FG
1578
1579 unsafe {
1580 let ptr: *mut bool = Box::into_raw(Box::new(false));
1581 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1582 {
1583 let a = <$atomic_type>::from_ptr(ptr);
1584 *a.as_ptr() = true;
1585 }
1586 assert_eq!((*ptr), true);
1587 drop(Box::from_raw(ptr));
1588 }
781aab86
FG
1589 }
1590 };
1591}
1592macro_rules! __test_atomic_ptr_pub {
1593 ($atomic_type:ty) => {
1594 __test_atomic_pub_common!($atomic_type, *mut u8);
1595 use sptr::Strict;
ed00b5ec 1596 use std::{boxed::Box, mem};
781aab86
FG
1597 #[test]
1598 fn fetch_update() {
1599 let a = <$atomic_type>::new(ptr::null_mut());
1600 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1601 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1602 let a = <$atomic_type>::new(ptr::null_mut());
1603 assert_eq!(a.fetch_update(success, failure, |_| None), Err(ptr::null_mut()));
1604 assert_eq!(
1605 a.fetch_update(success, failure, |_| Some(&a as *const _ as *mut _)),
1606 Ok(ptr::null_mut())
1607 );
1608 assert_eq!(a.load(Ordering::SeqCst), &a as *const _ as *mut _);
1609 }
1610 }
1611 #[test]
1612 fn impls() {
1613 let a = <$atomic_type>::default();
1614 let b = <$atomic_type>::from(ptr::null_mut());
1615 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1616 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
1617 assert_eq!(std::format!("{:p}", a), std::format!("{:p}", a.load(Ordering::SeqCst)));
ed00b5ec
FG
1618
1619 unsafe {
1620 let ptr: *mut Align16<*mut u8> = Box::into_raw(Box::new(Align16(ptr::null_mut())));
1621 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1622 {
1623 let a = <$atomic_type>::from_ptr(ptr.cast::<*mut u8>());
1624 *a.as_ptr() = ptr::null_mut::<u8>().wrapping_add(1);
1625 }
1626 assert_eq!((*ptr).0, ptr::null_mut::<u8>().wrapping_add(1));
1627 drop(Box::from_raw(ptr));
1628 }
781aab86
FG
1629 }
1630 // https://github.com/rust-lang/rust/blob/1.70.0/library/core/tests/atomic.rs#L130-L213
1631 #[test]
1632 fn ptr_add_null() {
1633 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1634 assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst).addr(), 0);
1635 assert_eq!(atom.load(Ordering::SeqCst).addr(), 8);
1636
1637 assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst).addr(), 8);
1638 assert_eq!(atom.load(Ordering::SeqCst).addr(), 9);
1639
1640 assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst).addr(), 9);
1641 assert_eq!(atom.load(Ordering::SeqCst).addr(), 1);
1642
1643 assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst).addr(), 1);
1644 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0);
1645 }
1646 #[test]
1647 fn ptr_add_data() {
1648 let num = 0i64;
1649 let n = &num as *const i64 as *mut _;
1650 let atom = AtomicPtr::<i64>::new(n);
1651 assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst), n);
1652 assert_eq!(atom.load(Ordering::SeqCst), n.wrapping_add(1));
1653
1654 assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst), n.wrapping_add(1));
1655 assert_eq!(atom.load(Ordering::SeqCst), n);
1656 let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
1657
1658 assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst), n);
1659 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(1));
1660
1661 assert_eq!(atom.fetch_byte_add(5, Ordering::SeqCst), bytes_from_n(1));
1662 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(6));
1663
1664 assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst), bytes_from_n(6));
1665 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(5));
1666
1667 assert_eq!(atom.fetch_byte_sub(5, Ordering::SeqCst), bytes_from_n(5));
1668 assert_eq!(atom.load(Ordering::SeqCst), n);
1669 }
1670 #[test]
1671 fn ptr_bitops() {
1672 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1673 assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst).addr(), 0);
1674 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0111);
1675
1676 assert_eq!(atom.fetch_and(0b1101, Ordering::SeqCst).addr(), 0b0111);
1677 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0101);
1678
1679 assert_eq!(atom.fetch_xor(0b1111, Ordering::SeqCst).addr(), 0b0101);
1680 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b1010);
1681 }
1682 #[test]
1683 fn ptr_bitops_tagging() {
1684 const MASK_TAG: usize = 0b1111;
1685 const MASK_PTR: usize = !MASK_TAG;
1686
1687 #[repr(align(16))]
1688 struct Tagme(u128);
1689
1690 let tagme = Tagme(1000);
1691 let ptr = &tagme as *const Tagme as *mut Tagme;
1692 let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
1693
1694 assert_eq!(ptr.addr() & MASK_TAG, 0);
1695
1696 assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst), ptr);
1697 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b111));
1698
1699 assert_eq!(
1700 atom.fetch_and(MASK_PTR | 0b0010, Ordering::SeqCst),
1701 ptr.map_addr(|a| a | 0b111)
1702 );
1703 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010));
1704
1705 assert_eq!(atom.fetch_xor(0b1011, Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010));
1706 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001));
1707
1708 assert_eq!(atom.fetch_and(MASK_PTR, Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001));
1709 assert_eq!(atom.load(Ordering::SeqCst), ptr);
1710 }
1711 #[test]
1712 fn bit_set() {
1713 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>().map_addr(|a| a | 1));
1714 test_swap_ordering(|order| assert!(a.bit_set(0, order)));
1715 for &order in &test_helper::SWAP_ORDERINGS {
1716 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1717 let atom = <$atomic_type>::new(pointer);
1718 // Tag the bottom bit of the pointer.
1719 assert!(!atom.bit_set(0, order));
1720 // Extract and untag.
1721 let tagged = atom.load(Ordering::Relaxed);
1722 assert_eq!(tagged.addr() & 1, 1);
1723 assert_eq!(tagged.map_addr(|p| p & !1), pointer);
1724 }
1725 }
1726 #[test]
1727 fn bit_clear() {
1728 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>());
1729 test_swap_ordering(|order| assert!(!a.bit_clear(0, order)));
1730 for &order in &test_helper::SWAP_ORDERINGS {
1731 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1732 // A tagged pointer
1733 let atom = <$atomic_type>::new(pointer.map_addr(|a| a | 1));
1734 assert!(atom.bit_set(0, order));
1735 // Untag
1736 assert!(atom.bit_clear(0, order));
1737 }
1738 }
1739 #[test]
1740 fn bit_toggle() {
1741 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>());
1742 test_swap_ordering(|order| a.bit_toggle(0, order));
1743 for &order in &test_helper::SWAP_ORDERINGS {
1744 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1745 let atom = <$atomic_type>::new(pointer);
1746 // Toggle a tag bit on the pointer.
1747 atom.bit_toggle(0, order);
1748 assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
1749 }
1750 }
1751 };
1752}
1753
1754macro_rules! test_atomic_int_load_store {
1755 ($int_type:ident) => {
1756 paste::paste! {
1757 #[allow(
1758 clippy::alloc_instead_of_core,
1759 clippy::std_instead_of_alloc,
1760 clippy::std_instead_of_core,
1761 clippy::undocumented_unsafe_blocks
1762 )]
1763 mod [<test_atomic_ $int_type>] {
1764 use super::*;
1765 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1766 }
1767 }
1768 };
1769}
1770macro_rules! test_atomic_bool_load_store {
1771 () => {
1772 #[allow(
1773 clippy::alloc_instead_of_core,
1774 clippy::std_instead_of_alloc,
1775 clippy::std_instead_of_core,
1776 clippy::undocumented_unsafe_blocks
1777 )]
1778 mod test_atomic_bool {
1779 use super::*;
1780 __test_atomic_bool_load_store!(AtomicBool);
1781 }
1782 };
1783}
1784macro_rules! test_atomic_ptr_load_store {
1785 () => {
1786 #[allow(
1787 clippy::alloc_instead_of_core,
1788 clippy::std_instead_of_alloc,
1789 clippy::std_instead_of_core,
1790 clippy::undocumented_unsafe_blocks
1791 )]
1792 mod test_atomic_ptr {
1793 use super::*;
1794 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1795 }
1796 };
1797}
1798
1799macro_rules! test_atomic_int_single_thread {
1800 ($int_type:ident) => {
1801 paste::paste! {
1802 #[allow(
1803 clippy::alloc_instead_of_core,
1804 clippy::std_instead_of_alloc,
1805 clippy::std_instead_of_core,
1806 clippy::undocumented_unsafe_blocks
1807 )]
1808 mod [<test_atomic_ $int_type>] {
1809 use super::*;
1810 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type, single_thread);
1811 __test_atomic_int!([<Atomic $int_type:camel>], $int_type, single_thread);
1812 }
1813 }
1814 };
1815}
1816macro_rules! test_atomic_bool_single_thread {
1817 () => {
1818 #[allow(
1819 clippy::alloc_instead_of_core,
1820 clippy::std_instead_of_alloc,
1821 clippy::std_instead_of_core,
1822 clippy::undocumented_unsafe_blocks
1823 )]
1824 mod test_atomic_bool {
1825 use super::*;
1826 __test_atomic_bool_load_store!(AtomicBool, single_thread);
1827 __test_atomic_bool!(AtomicBool, single_thread);
1828 }
1829 };
1830}
1831macro_rules! test_atomic_ptr_single_thread {
1832 () => {
1833 #[allow(
1834 clippy::alloc_instead_of_core,
1835 clippy::std_instead_of_alloc,
1836 clippy::std_instead_of_core,
1837 clippy::undocumented_unsafe_blocks
1838 )]
1839 mod test_atomic_ptr {
1840 use super::*;
1841 __test_atomic_ptr_load_store!(AtomicPtr<u8>, single_thread);
1842 __test_atomic_ptr!(AtomicPtr<u8>, single_thread);
1843 }
1844 };
1845}
1846
1847macro_rules! test_atomic_int {
1848 ($int_type:ident) => {
1849 paste::paste! {
1850 #[allow(
1851 clippy::alloc_instead_of_core,
1852 clippy::std_instead_of_alloc,
1853 clippy::std_instead_of_core,
1854 clippy::undocumented_unsafe_blocks
1855 )]
1856 mod [<test_atomic_ $int_type>] {
1857 use super::*;
1858 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1859 __test_atomic_int!([<Atomic $int_type:camel>], $int_type);
1860 }
1861 }
1862 };
1863}
1864macro_rules! test_atomic_bool {
1865 () => {
1866 #[allow(
1867 clippy::alloc_instead_of_core,
1868 clippy::std_instead_of_alloc,
1869 clippy::std_instead_of_core,
1870 clippy::undocumented_unsafe_blocks
1871 )]
1872 mod test_atomic_bool {
1873 use super::*;
1874 __test_atomic_bool_load_store!(AtomicBool);
1875 __test_atomic_bool!(AtomicBool);
1876 }
1877 };
1878}
1879macro_rules! test_atomic_ptr {
1880 () => {
1881 #[allow(
1882 clippy::alloc_instead_of_core,
1883 clippy::std_instead_of_alloc,
1884 clippy::std_instead_of_core,
1885 clippy::undocumented_unsafe_blocks
1886 )]
1887 #[allow(unstable_name_collisions)] // for sptr crate
1888 mod test_atomic_ptr {
1889 use super::*;
1890 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1891 __test_atomic_ptr!(AtomicPtr<u8>);
1892 }
1893 };
1894}
1895
1896macro_rules! test_atomic_int_pub {
1897 ($int_type:ident) => {
1898 paste::paste! {
1899 #[allow(
1900 clippy::alloc_instead_of_core,
1901 clippy::std_instead_of_alloc,
1902 clippy::std_instead_of_core,
1903 clippy::undocumented_unsafe_blocks
1904 )]
1905 mod [<test_atomic_ $int_type>] {
1906 use super::*;
1907 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1908 __test_atomic_int!([<Atomic $int_type:camel>], $int_type);
1909 __test_atomic_int_load_store_pub!([<Atomic $int_type:camel>], $int_type);
1910 __test_atomic_int_pub!([<Atomic $int_type:camel>], $int_type);
1911 }
1912 }
1913 };
1914}
1915macro_rules! test_atomic_int_load_store_pub {
1916 ($int_type:ident) => {
1917 paste::paste! {
1918 #[allow(
1919 clippy::alloc_instead_of_core,
1920 clippy::std_instead_of_alloc,
1921 clippy::std_instead_of_core,
1922 clippy::undocumented_unsafe_blocks
1923 )]
1924 mod [<test_atomic_ $int_type>] {
1925 use super::*;
1926 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1927 __test_atomic_int_load_store_pub!([<Atomic $int_type:camel>], $int_type);
1928 }
1929 }
1930 };
1931}
1932#[cfg(feature = "float")]
1933macro_rules! test_atomic_float_pub {
1934 ($float_type:ident) => {
1935 paste::paste! {
1936 #[allow(
1937 clippy::alloc_instead_of_core,
1938 clippy::std_instead_of_alloc,
1939 clippy::std_instead_of_core,
1940 clippy::undocumented_unsafe_blocks
1941 )]
1942 mod [<test_atomic_ $float_type>] {
1943 use super::*;
1944 __test_atomic_float_load_store!([<Atomic $float_type:camel>], $float_type);
1945 __test_atomic_float!([<Atomic $float_type:camel>], $float_type);
1946 __test_atomic_float_pub!([<Atomic $float_type:camel>], $float_type);
1947 }
1948 }
1949 };
1950}
1951macro_rules! test_atomic_bool_pub {
1952 () => {
1953 #[allow(
1954 clippy::alloc_instead_of_core,
1955 clippy::std_instead_of_alloc,
1956 clippy::std_instead_of_core,
1957 clippy::undocumented_unsafe_blocks
1958 )]
1959 mod test_atomic_bool {
1960 use super::*;
1961 __test_atomic_bool_load_store!(AtomicBool);
1962 __test_atomic_bool!(AtomicBool);
1963 __test_atomic_bool_pub!(AtomicBool);
1964 }
1965 };
1966}
1967macro_rules! test_atomic_ptr_pub {
1968 () => {
1969 #[allow(
1970 clippy::alloc_instead_of_core,
1971 clippy::std_instead_of_alloc,
1972 clippy::std_instead_of_core,
1973 clippy::undocumented_unsafe_blocks
1974 )]
1975 #[allow(unstable_name_collisions)] // for sptr crate
1976 mod test_atomic_ptr {
1977 use super::*;
1978 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1979 __test_atomic_ptr!(AtomicPtr<u8>);
1980 __test_atomic_ptr_pub!(AtomicPtr<u8>);
1981 }
1982 };
1983}
1984
1985// Asserts that `$a` and `$b` have performed equivalent operations.
1986#[cfg(feature = "float")]
1987macro_rules! assert_float_op_eq {
1988 ($a:expr, $b:expr $(,)?) => {{
1989 // See also:
1990 // - https://github.com/rust-lang/unsafe-code-guidelines/issues/237.
1991 // - https://github.com/rust-lang/portable-simd/issues/39.
1992 let a = $a;
1993 let b = $b;
1994 if a.is_nan() && b.is_nan() // don't check sign of NaN: https://github.com/rust-lang/rust/issues/55131
1995 || a.is_infinite()
1996 && b.is_infinite()
1997 && a.is_sign_positive() == b.is_sign_positive()
1998 && a.is_sign_negative() == b.is_sign_negative()
1999 {
2000 // ok
2001 } else {
2002 assert_eq!(a, b);
2003 }
2004 }};
2005}
2006
2007pub(crate) trait FloatExt: Copy {
2008 fn epsilon(&self) -> Self;
2009}
2010impl FloatExt for f32 {
2011 fn epsilon(&self) -> Self {
2012 Self::EPSILON
2013 }
2014}
2015impl FloatExt for f64 {
2016 fn epsilon(&self) -> Self {
2017 Self::EPSILON
2018 }
2019}
2020
2021#[cfg_attr(not(portable_atomic_no_track_caller), track_caller)]
2022pub(crate) fn assert_panic<T: std::fmt::Debug>(f: impl FnOnce() -> T) -> std::string::String {
2023 let backtrace = std::env::var_os("RUST_BACKTRACE");
2024 let hook = std::panic::take_hook();
2025 std::env::set_var("RUST_BACKTRACE", "0"); // Suppress backtrace
2026 std::panic::set_hook(std::boxed::Box::new(|_| {})); // Suppress panic msg
2027 let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(f));
2028 std::panic::set_hook(hook);
2029 match backtrace {
2030 Some(v) => std::env::set_var("RUST_BACKTRACE", v),
2031 None => std::env::remove_var("RUST_BACKTRACE"),
2032 }
2033 let msg = res.unwrap_err();
2034 msg.downcast_ref::<std::string::String>()
2035 .cloned()
2036 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into())
2037}
2038pub(crate) fn rand_load_ordering() -> Ordering {
2039 test_helper::LOAD_ORDERINGS[fastrand::usize(0..test_helper::LOAD_ORDERINGS.len())]
2040}
2041pub(crate) fn test_load_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2042 for &order in &test_helper::LOAD_ORDERINGS {
2043 f(order);
2044 }
2045
2046 if !skip_should_panic_test() {
2047 assert_eq!(
2048 assert_panic(|| f(Ordering::Release)),
2049 "there is no such thing as a release load"
2050 );
2051 assert_eq!(
2052 assert_panic(|| f(Ordering::AcqRel)),
2053 "there is no such thing as an acquire-release load"
2054 );
2055 }
2056}
2057pub(crate) fn rand_store_ordering() -> Ordering {
2058 test_helper::STORE_ORDERINGS[fastrand::usize(0..test_helper::STORE_ORDERINGS.len())]
2059}
2060pub(crate) fn test_store_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2061 for &order in &test_helper::STORE_ORDERINGS {
2062 f(order);
2063 }
2064
2065 if !skip_should_panic_test() {
2066 assert_eq!(
2067 assert_panic(|| f(Ordering::Acquire)),
2068 "there is no such thing as an acquire store"
2069 );
2070 assert_eq!(
2071 assert_panic(|| f(Ordering::AcqRel)),
2072 "there is no such thing as an acquire-release store"
2073 );
2074 }
2075}
2076pub(crate) fn rand_compare_exchange_ordering() -> (Ordering, Ordering) {
2077 test_helper::COMPARE_EXCHANGE_ORDERINGS
2078 [fastrand::usize(0..test_helper::COMPARE_EXCHANGE_ORDERINGS.len())]
2079}
2080pub(crate) fn test_compare_exchange_ordering<T: std::fmt::Debug>(
2081 f: impl Fn(Ordering, Ordering) -> T,
2082) {
2083 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
2084 f(success, failure);
2085 }
2086
2087 if !skip_should_panic_test() {
2088 for &order in &test_helper::SWAP_ORDERINGS {
2089 let msg = assert_panic(|| f(order, Ordering::AcqRel));
2090 assert!(
2091 msg == "there is no such thing as an acquire-release failure ordering"
2092 || msg == "there is no such thing as an acquire-release load",
2093 "{}",
2094 msg
2095 );
2096 let msg = assert_panic(|| f(order, Ordering::Release));
2097 assert!(
2098 msg == "there is no such thing as a release failure ordering"
2099 || msg == "there is no such thing as a release load",
2100 "{}",
2101 msg
2102 );
2103 }
2104 }
2105}
2106pub(crate) fn rand_swap_ordering() -> Ordering {
2107 test_helper::SWAP_ORDERINGS[fastrand::usize(0..test_helper::SWAP_ORDERINGS.len())]
2108}
2109pub(crate) fn test_swap_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2110 for &order in &test_helper::SWAP_ORDERINGS {
2111 f(order);
2112 }
2113}
2114// for stress test generated by __test_atomic_* macros
2115pub(crate) fn stress_test_config() -> (usize, usize) {
2116 let iterations = if cfg!(miri) {
2117 50
2118 } else if cfg!(debug_assertions) {
2119 5_000
2120 } else {
2121 25_000
2122 };
2123 let threads = if cfg!(debug_assertions) { 2 } else { fastrand::usize(2..=8) };
2124 std::eprintln!("threads={}", threads);
2125 (iterations, threads)
2126}
2127fn skip_should_panic_test() -> bool {
2128 // Miri's panic handling is slow
2129 // MSAN false positive: https://gist.github.com/taiki-e/dd6269a8ffec46284fdc764a4849f884
2130 is_panic_abort()
2131 || cfg!(miri)
2132 || option_env!("CARGO_PROFILE_RELEASE_LTO").map_or(false, |v| v == "fat")
2133 && build_context::SANITIZE.contains("memory")
2134}
2135
2136// For -C panic=abort -Z panic_abort_tests: https://github.com/rust-lang/rust/issues/67650
2137fn is_panic_abort() -> bool {
2138 build_context::PANIC.contains("abort")
2139}
2140
ed00b5ec
FG
2141#[repr(C, align(16))]
2142pub(crate) struct Align16<T>(pub(crate) T);
2143
781aab86
FG
2144// Test the cases that should not fail if the memory ordering is implemented correctly.
2145// This is still not exhaustive and only tests a few cases.
2146// This currently only supports 32-bit or more integers.
2147macro_rules! __stress_test_acquire_release {
2148 (should_pass, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2149 paste::paste! {
2150 #[test]
2151 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2152 __stress_test_acquire_release!([<Atomic $int_type:camel>],
2153 $int_type, $write, $load_order, $store_order);
2154 }
2155 }
2156 };
2157 (can_panic, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2158 paste::paste! {
2159 // Currently, to make this test work well enough outside of Miri, tens of thousands
2160 // of iterations are needed, but this test is slow in some environments.
ed00b5ec 2161 // So, ignore on non-Miri environments by default. See also catch_unwind_on_weak_memory_arch.
781aab86
FG
2162 #[test]
2163 #[cfg_attr(not(miri), ignore)]
2164 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2165 can_panic("a=", || __stress_test_acquire_release!([<Atomic $int_type:camel>],
2166 $int_type, $write, $load_order, $store_order));
2167 }
2168 }
2169 };
2170 ($atomic_type:ident, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {{
2171 use super::*;
2172 use crossbeam_utils::thread;
2173 use std::{
2174 convert::TryFrom,
2175 sync::atomic::{AtomicUsize, Ordering},
2176 };
2177 let mut n: usize = if cfg!(miri) { 10 } else { 50_000 };
2178 // This test is relatively fast because it spawns only one thread, but
2179 // the iterations are limited to a maximum value of integers.
2180 if $int_type::try_from(n).is_err() {
2181 n = $int_type::MAX as usize;
2182 }
2183 let a = &$atomic_type::new(0);
2184 let b = &AtomicUsize::new(0);
2185 thread::scope(|s| {
2186 s.spawn(|_| {
2187 for i in 0..n {
2188 b.store(i, Ordering::Relaxed);
2189 a.$write(i as _, Ordering::$store_order);
2190 }
2191 });
2192 loop {
2193 let a = a.load(Ordering::$load_order);
2194 let b = b.load(Ordering::Relaxed);
2195 assert!(a as usize <= b, "a={},b={}", a, b);
2196 if a as usize == n - 1 {
2197 break;
2198 }
2199 }
2200 })
2201 .unwrap();
2202 }};
2203}
2204macro_rules! __stress_test_seqcst {
2205 (should_pass, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2206 paste::paste! {
2207 // Currently, to make this test work well enough outside of Miri, tens of thousands
2208 // of iterations are needed, but this test is very slow in some environments because
2209 // it creates two threads for each iteration.
2210 // So, ignore on QEMU by default.
2211 #[test]
2212 #[cfg_attr(qemu, ignore)]
2213 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2214 __stress_test_seqcst!([<Atomic $int_type:camel>],
2215 $write, $load_order, $store_order);
2216 }
2217 }
2218 };
2219 (can_panic, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2220 paste::paste! {
2221 // Currently, to make this test work well enough outside of Miri, tens of thousands
2222 // of iterations are needed, but this test is very slow in some environments because
2223 // it creates two threads for each iteration.
ed00b5ec 2224 // So, ignore on non-Miri environments by default. See also catch_unwind_on_non_seqcst_arch.
781aab86
FG
2225 #[test]
2226 #[cfg_attr(not(miri), ignore)]
2227 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2228 can_panic("c=2", || __stress_test_seqcst!([<Atomic $int_type:camel>],
2229 $write, $load_order, $store_order));
2230 }
2231 }
2232 };
2233 ($atomic_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {{
2234 use super::*;
2235 use crossbeam_utils::thread;
2236 use std::sync::atomic::{AtomicUsize, Ordering};
2237 let n: usize = if cfg!(miri) {
2238 8
2239 } else if cfg!(valgrind)
2240 || build_context::SANITIZE.contains("address")
2241 || build_context::SANITIZE.contains("memory")
2242 {
2243 50
2244 } else if option_env!("GITHUB_ACTIONS").is_some() && cfg!(not(target_os = "linux")) {
2245 // GitHub Actions' macOS and Windows runners are slow.
2246 5_000
2247 } else {
2248 50_000
2249 };
2250 let a = &$atomic_type::new(0);
2251 let b = &$atomic_type::new(0);
2252 let c = &AtomicUsize::new(0);
2253 let ready = &AtomicUsize::new(0);
2254 thread::scope(|s| {
2255 for n in 0..n {
2256 a.store(0, Ordering::Relaxed);
2257 b.store(0, Ordering::Relaxed);
2258 c.store(0, Ordering::Relaxed);
2259 let h_a = s.spawn(|_| {
2260 while ready.load(Ordering::Relaxed) == 0 {}
2261 a.$write(1, Ordering::$store_order);
2262 if b.load(Ordering::$load_order) == 0 {
2263 c.fetch_add(1, Ordering::Relaxed);
2264 }
2265 });
2266 let h_b = s.spawn(|_| {
2267 while ready.load(Ordering::Relaxed) == 0 {}
2268 b.$write(1, Ordering::$store_order);
2269 if a.load(Ordering::$load_order) == 0 {
2270 c.fetch_add(1, Ordering::Relaxed);
2271 }
2272 });
2273 ready.store(1, Ordering::Relaxed);
2274 h_a.join().unwrap();
2275 h_b.join().unwrap();
2276 let c = c.load(Ordering::Relaxed);
2277 assert!(c == 0 || c == 1, "c={},n={}", c, n);
2278 }
2279 })
2280 .unwrap();
2281 }};
2282}
2283// Catches unwinding panic on architectures with weak memory models.
2284#[allow(dead_code, clippy::used_underscore_binding)]
2285pub(crate) fn catch_unwind_on_weak_memory_arch(pat: &str, f: impl Fn()) {
2286 // With x86 TSO, RISC-V TSO (optional, not default), SPARC TSO (optional, default),
2287 // and IBM-370 memory models should never be a panic here.
2288 // Miri emulates weak memory models regardless of target architectures.
2289 if cfg!(all(
2290 any(
2291 target_arch = "x86",
2292 target_arch = "x86_64",
2293 target_arch = "s390x",
2294 target_arch = "sparc",
2295 target_arch = "sparc64",
2296 ),
2297 not(any(miri)),
2298 )) {
2299 f();
2300 } else if !is_panic_abort() {
2301 // This could be is_err on architectures with weak memory models.
2302 // However, this does not necessarily mean that it will always be panic,
2303 // and implementing it with stronger orderings is also okay.
2304 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
2305 Ok(()) => {
2306 // panic!();
2307 }
2308 Err(msg) => {
2309 let msg = msg
2310 .downcast_ref::<std::string::String>()
2311 .cloned()
2312 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into());
2313 assert!(msg.contains(pat), "{}", msg);
2314 }
2315 }
2316 }
2317}
2318// Catches unwinding panic on architectures with non-sequentially consistent memory models.
2319#[allow(dead_code, clippy::used_underscore_binding)]
2320pub(crate) fn catch_unwind_on_non_seqcst_arch(pat: &str, f: impl Fn()) {
2321 if !is_panic_abort() {
2322 // This could be Err on architectures with non-sequentially consistent memory models.
2323 // However, this does not necessarily mean that it will always be panic,
2324 // and implementing it with stronger orderings is also okay.
2325 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
2326 Ok(()) => {
2327 // panic!();
2328 }
2329 Err(msg) => {
2330 let msg = msg
2331 .downcast_ref::<std::string::String>()
2332 .cloned()
2333 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into());
2334 assert!(msg.contains(pat), "{}", msg);
2335 }
2336 }
2337 }
2338}
2339macro_rules! stress_test_load_store {
2340 ($int_type:ident) => {
2341 // debug mode is slow.
2342 #[cfg(any(not(debug_assertions), miri))]
2343 paste::paste! {
2344 #[allow(
2345 clippy::alloc_instead_of_core,
2346 clippy::std_instead_of_alloc,
2347 clippy::std_instead_of_core,
2348 clippy::undocumented_unsafe_blocks
2349 )]
2350 mod [<stress_acquire_release_load_store_ $int_type>] {
2351 use crate::tests::helper::catch_unwind_on_weak_memory_arch as can_panic;
2352 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, Relaxed);
2353 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, Release);
2354 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, SeqCst);
2355 __stress_test_acquire_release!(can_panic, $int_type, store, Acquire, Relaxed);
2356 __stress_test_acquire_release!(should_pass, $int_type, store, Acquire, Release);
2357 __stress_test_acquire_release!(should_pass, $int_type, store, Acquire, SeqCst);
2358 __stress_test_acquire_release!(can_panic, $int_type, store, SeqCst, Relaxed);
2359 __stress_test_acquire_release!(should_pass, $int_type, store, SeqCst, Release);
2360 __stress_test_acquire_release!(should_pass, $int_type, store, SeqCst, SeqCst);
2361 }
2362 #[allow(
2363 clippy::alloc_instead_of_core,
2364 clippy::std_instead_of_alloc,
2365 clippy::std_instead_of_core,
2366 clippy::undocumented_unsafe_blocks
2367 )]
2368 mod [<stress_seqcst_load_store_ $int_type>] {
2369 use crate::tests::helper::catch_unwind_on_non_seqcst_arch as can_panic;
2370 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, Relaxed);
2371 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, Release);
2372 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, SeqCst);
2373 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, Relaxed);
2374 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, Release);
2375 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, SeqCst);
2376 __stress_test_seqcst!(can_panic, $int_type, store, SeqCst, Relaxed);
2377 __stress_test_seqcst!(can_panic, $int_type, store, SeqCst, Release);
2378 __stress_test_seqcst!(should_pass, $int_type, store, SeqCst, SeqCst);
2379 }
2380 }
2381 };
2382}
2383macro_rules! stress_test {
2384 ($int_type:ident) => {
2385 stress_test_load_store!($int_type);
2386 // debug mode is slow.
2387 #[cfg(any(not(debug_assertions), miri))]
2388 paste::paste! {
2389 #[allow(
2390 clippy::alloc_instead_of_core,
2391 clippy::std_instead_of_alloc,
2392 clippy::std_instead_of_core,
2393 clippy::undocumented_unsafe_blocks
2394 )]
2395 mod [<stress_acquire_release_load_swap_ $int_type>] {
2396 use crate::tests::helper::catch_unwind_on_weak_memory_arch as can_panic;
2397 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Relaxed);
2398 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Acquire);
2399 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Release);
2400 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, AcqRel);
2401 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, SeqCst);
2402 __stress_test_acquire_release!(can_panic, $int_type, swap, Acquire, Relaxed);
2403 __stress_test_acquire_release!(can_panic, $int_type, swap, Acquire, Acquire);
2404 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, Release);
2405 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, AcqRel);
2406 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, SeqCst);
2407 __stress_test_acquire_release!(can_panic, $int_type, swap, SeqCst, Relaxed);
2408 __stress_test_acquire_release!(can_panic, $int_type, swap, SeqCst, Acquire);
2409 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, Release);
2410 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, AcqRel);
2411 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, SeqCst);
2412 }
2413 #[allow(
2414 clippy::alloc_instead_of_core,
2415 clippy::std_instead_of_alloc,
2416 clippy::std_instead_of_core,
2417 clippy::undocumented_unsafe_blocks
2418 )]
2419 mod [<stress_seqcst_load_swap_ $int_type>] {
2420 use crate::tests::helper::catch_unwind_on_non_seqcst_arch as can_panic;
2421 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Relaxed);
2422 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Acquire);
2423 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Release);
2424 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, AcqRel);
2425 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, SeqCst);
2426 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Relaxed);
2427 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Acquire);
2428 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Release);
2429 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, AcqRel);
2430 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, SeqCst);
2431 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Relaxed);
2432 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Acquire);
2433 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Release);
2434 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, AcqRel);
2435 __stress_test_seqcst!(should_pass, $int_type, swap, SeqCst, SeqCst);
2436 }
2437 }
2438 };
2439}