]>
git.proxmox.com Git - rustc.git/blob - src/jemalloc/include/jemalloc/internal/atomic.h
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
4 #endif /* JEMALLOC_H_TYPES */
5 /******************************************************************************/
6 #ifdef JEMALLOC_H_STRUCTS
8 #endif /* JEMALLOC_H_STRUCTS */
9 /******************************************************************************/
10 #ifdef JEMALLOC_H_EXTERNS
12 #define atomic_read_uint64(p) atomic_add_uint64(p, 0)
13 #define atomic_read_uint32(p) atomic_add_uint32(p, 0)
14 #define atomic_read_p(p) atomic_add_p(p, NULL)
15 #define atomic_read_z(p) atomic_add_z(p, 0)
16 #define atomic_read_u(p) atomic_add_u(p, 0)
18 #endif /* JEMALLOC_H_EXTERNS */
19 /******************************************************************************/
20 #ifdef JEMALLOC_H_INLINES
23 * All arithmetic functions return the arithmetic result of the atomic
24 * operation. Some atomic operation APIs return the value prior to mutation, in
25 * which case the following functions must redundantly compute the result so
26 * that it can be returned. These functions are normally inlined, so the extra
27 * operations can be optimized away if the return values aren't used by the
30 * <t> atomic_read_<t>(<t> *p) { return (*p); }
31 * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
32 * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
33 * bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
40 * void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
43 #ifndef JEMALLOC_ENABLE_INLINE
44 uint64_t atomic_add_uint64(uint64_t *p
, uint64_t x
);
45 uint64_t atomic_sub_uint64(uint64_t *p
, uint64_t x
);
46 bool atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
);
47 void atomic_write_uint64(uint64_t *p
, uint64_t x
);
48 uint32_t atomic_add_uint32(uint32_t *p
, uint32_t x
);
49 uint32_t atomic_sub_uint32(uint32_t *p
, uint32_t x
);
50 bool atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
);
51 void atomic_write_uint32(uint32_t *p
, uint32_t x
);
52 void *atomic_add_p(void **p
, void *x
);
53 void *atomic_sub_p(void **p
, void *x
);
54 bool atomic_cas_p(void **p
, void *c
, void *s
);
55 void atomic_write_p(void **p
, const void *x
);
56 size_t atomic_add_z(size_t *p
, size_t x
);
57 size_t atomic_sub_z(size_t *p
, size_t x
);
58 bool atomic_cas_z(size_t *p
, size_t c
, size_t s
);
59 void atomic_write_z(size_t *p
, size_t x
);
60 unsigned atomic_add_u(unsigned *p
, unsigned x
);
61 unsigned atomic_sub_u(unsigned *p
, unsigned x
);
62 bool atomic_cas_u(unsigned *p
, unsigned c
, unsigned s
);
63 void atomic_write_u(unsigned *p
, unsigned x
);
66 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
67 /******************************************************************************/
68 /* 64-bit operations. */
69 #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
70 # if (defined(__amd64__) || defined(__x86_64__))
71 JEMALLOC_INLINE
uint64_t
72 atomic_add_uint64(uint64_t *p
, uint64_t x
)
78 : "+r" (t
), "=m" (*p
) /* Outputs. */
79 : "m" (*p
) /* Inputs. */
85 JEMALLOC_INLINE
uint64_t
86 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
90 x
= (uint64_t)(-(int64_t)x
);
94 : "+r" (t
), "=m" (*p
) /* Outputs. */
95 : "m" (*p
) /* Inputs. */
102 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
107 "lock; cmpxchgq %4, %0;"
109 : "=m" (*p
), "=a" (success
) /* Outputs. */
110 : "m" (*p
), "a" (c
), "r" (s
) /* Inputs. */
111 : "memory" /* Clobbers. */
114 return (!(bool)success
);
118 atomic_write_uint64(uint64_t *p
, uint64_t x
)
122 "xchgq %1, %0;" /* Lock is implied by xchgq. */
123 : "=m" (*p
), "+r" (x
) /* Outputs. */
124 : "m" (*p
) /* Inputs. */
125 : "memory" /* Clobbers. */
128 # elif (defined(JEMALLOC_C11ATOMICS))
129 JEMALLOC_INLINE
uint64_t
130 atomic_add_uint64(uint64_t *p
, uint64_t x
)
132 volatile atomic_uint_least64_t
*a
= (volatile atomic_uint_least64_t
*)p
;
133 return (atomic_fetch_add(a
, x
) + x
);
136 JEMALLOC_INLINE
uint64_t
137 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
139 volatile atomic_uint_least64_t
*a
= (volatile atomic_uint_least64_t
*)p
;
140 return (atomic_fetch_sub(a
, x
) - x
);
144 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
146 volatile atomic_uint_least64_t
*a
= (volatile atomic_uint_least64_t
*)p
;
147 return (!atomic_compare_exchange_strong(a
, &c
, s
));
151 atomic_write_uint64(uint64_t *p
, uint64_t x
)
153 volatile atomic_uint_least64_t
*a
= (volatile atomic_uint_least64_t
*)p
;
156 # elif (defined(JEMALLOC_ATOMIC9))
157 JEMALLOC_INLINE
uint64_t
158 atomic_add_uint64(uint64_t *p
, uint64_t x
)
162 * atomic_fetchadd_64() doesn't exist, but we only ever use this
163 * function on LP64 systems, so atomic_fetchadd_long() will do.
165 assert(sizeof(uint64_t) == sizeof(unsigned long));
167 return (atomic_fetchadd_long(p
, (unsigned long)x
) + x
);
170 JEMALLOC_INLINE
uint64_t
171 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
174 assert(sizeof(uint64_t) == sizeof(unsigned long));
176 return (atomic_fetchadd_long(p
, (unsigned long)(-(long)x
)) - x
);
180 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
183 assert(sizeof(uint64_t) == sizeof(unsigned long));
185 return (!atomic_cmpset_long(p
, (unsigned long)c
, (unsigned long)s
));
189 atomic_write_uint64(uint64_t *p
, uint64_t x
)
192 assert(sizeof(uint64_t) == sizeof(unsigned long));
194 atomic_store_rel_long(p
, x
);
196 # elif (defined(JEMALLOC_OSATOMIC))
197 JEMALLOC_INLINE
uint64_t
198 atomic_add_uint64(uint64_t *p
, uint64_t x
)
201 return (OSAtomicAdd64((int64_t)x
, (int64_t *)p
));
204 JEMALLOC_INLINE
uint64_t
205 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
208 return (OSAtomicAdd64(-((int64_t)x
), (int64_t *)p
));
212 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
215 return (!OSAtomicCompareAndSwap64(c
, s
, (int64_t *)p
));
219 atomic_write_uint64(uint64_t *p
, uint64_t x
)
223 /*The documented OSAtomic*() API does not expose an atomic exchange. */
225 o
= atomic_read_uint64(p
);
226 } while (atomic_cas_uint64(p
, o
, x
));
228 # elif (defined(_MSC_VER))
229 JEMALLOC_INLINE
uint64_t
230 atomic_add_uint64(uint64_t *p
, uint64_t x
)
233 return (InterlockedExchangeAdd64(p
, x
) + x
);
236 JEMALLOC_INLINE
uint64_t
237 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
240 return (InterlockedExchangeAdd64(p
, -((int64_t)x
)) - x
);
244 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
248 o
= InterlockedCompareExchange64(p
, s
, c
);
253 atomic_write_uint64(uint64_t *p
, uint64_t x
)
256 InterlockedExchange64(p
, x
);
258 # elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
259 defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
260 JEMALLOC_INLINE
uint64_t
261 atomic_add_uint64(uint64_t *p
, uint64_t x
)
264 return (__sync_add_and_fetch(p
, x
));
267 JEMALLOC_INLINE
uint64_t
268 atomic_sub_uint64(uint64_t *p
, uint64_t x
)
271 return (__sync_sub_and_fetch(p
, x
));
275 atomic_cas_uint64(uint64_t *p
, uint64_t c
, uint64_t s
)
278 return (!__sync_bool_compare_and_swap(p
, c
, s
));
282 atomic_write_uint64(uint64_t *p
, uint64_t x
)
285 __sync_lock_test_and_set(p
, x
);
288 # error "Missing implementation for 64-bit atomic operations"
292 /******************************************************************************/
293 /* 32-bit operations. */
294 #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
295 JEMALLOC_INLINE
uint32_t
296 atomic_add_uint32(uint32_t *p
, uint32_t x
)
301 "lock; xaddl %0, %1;"
302 : "+r" (t
), "=m" (*p
) /* Outputs. */
303 : "m" (*p
) /* Inputs. */
309 JEMALLOC_INLINE
uint32_t
310 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
314 x
= (uint32_t)(-(int32_t)x
);
317 "lock; xaddl %0, %1;"
318 : "+r" (t
), "=m" (*p
) /* Outputs. */
319 : "m" (*p
) /* Inputs. */
326 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
331 "lock; cmpxchgl %4, %0;"
333 : "=m" (*p
), "=a" (success
) /* Outputs. */
334 : "m" (*p
), "a" (c
), "r" (s
) /* Inputs. */
338 return (!(bool)success
);
342 atomic_write_uint32(uint32_t *p
, uint32_t x
)
346 "xchgl %1, %0;" /* Lock is implied by xchgl. */
347 : "=m" (*p
), "+r" (x
) /* Outputs. */
348 : "m" (*p
) /* Inputs. */
349 : "memory" /* Clobbers. */
352 # elif (defined(JEMALLOC_C11ATOMICS))
353 JEMALLOC_INLINE
uint32_t
354 atomic_add_uint32(uint32_t *p
, uint32_t x
)
356 volatile atomic_uint_least32_t
*a
= (volatile atomic_uint_least32_t
*)p
;
357 return (atomic_fetch_add(a
, x
) + x
);
360 JEMALLOC_INLINE
uint32_t
361 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
363 volatile atomic_uint_least32_t
*a
= (volatile atomic_uint_least32_t
*)p
;
364 return (atomic_fetch_sub(a
, x
) - x
);
368 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
370 volatile atomic_uint_least32_t
*a
= (volatile atomic_uint_least32_t
*)p
;
371 return (!atomic_compare_exchange_strong(a
, &c
, s
));
375 atomic_write_uint32(uint32_t *p
, uint32_t x
)
377 volatile atomic_uint_least32_t
*a
= (volatile atomic_uint_least32_t
*)p
;
380 #elif (defined(JEMALLOC_ATOMIC9))
381 JEMALLOC_INLINE
uint32_t
382 atomic_add_uint32(uint32_t *p
, uint32_t x
)
385 return (atomic_fetchadd_32(p
, x
) + x
);
388 JEMALLOC_INLINE
uint32_t
389 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
392 return (atomic_fetchadd_32(p
, (uint32_t)(-(int32_t)x
)) - x
);
396 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
399 return (!atomic_cmpset_32(p
, c
, s
));
403 atomic_write_uint32(uint32_t *p
, uint32_t x
)
406 atomic_store_rel_32(p
, x
);
408 #elif (defined(JEMALLOC_OSATOMIC))
409 JEMALLOC_INLINE
uint32_t
410 atomic_add_uint32(uint32_t *p
, uint32_t x
)
413 return (OSAtomicAdd32((int32_t)x
, (int32_t *)p
));
416 JEMALLOC_INLINE
uint32_t
417 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
420 return (OSAtomicAdd32(-((int32_t)x
), (int32_t *)p
));
424 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
427 return (!OSAtomicCompareAndSwap32(c
, s
, (int32_t *)p
));
431 atomic_write_uint32(uint32_t *p
, uint32_t x
)
435 /*The documented OSAtomic*() API does not expose an atomic exchange. */
437 o
= atomic_read_uint32(p
);
438 } while (atomic_cas_uint32(p
, o
, x
));
440 #elif (defined(_MSC_VER))
441 JEMALLOC_INLINE
uint32_t
442 atomic_add_uint32(uint32_t *p
, uint32_t x
)
445 return (InterlockedExchangeAdd(p
, x
) + x
);
448 JEMALLOC_INLINE
uint32_t
449 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
452 return (InterlockedExchangeAdd(p
, -((int32_t)x
)) - x
);
456 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
460 o
= InterlockedCompareExchange(p
, s
, c
);
465 atomic_write_uint32(uint32_t *p
, uint32_t x
)
468 InterlockedExchange(p
, x
);
470 #elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
471 defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
472 JEMALLOC_INLINE
uint32_t
473 atomic_add_uint32(uint32_t *p
, uint32_t x
)
476 return (__sync_add_and_fetch(p
, x
));
479 JEMALLOC_INLINE
uint32_t
480 atomic_sub_uint32(uint32_t *p
, uint32_t x
)
483 return (__sync_sub_and_fetch(p
, x
));
487 atomic_cas_uint32(uint32_t *p
, uint32_t c
, uint32_t s
)
490 return (!__sync_bool_compare_and_swap(p
, c
, s
));
494 atomic_write_uint32(uint32_t *p
, uint32_t x
)
497 __sync_lock_test_and_set(p
, x
);
500 # error "Missing implementation for 32-bit atomic operations"
503 /******************************************************************************/
504 /* Pointer operations. */
505 JEMALLOC_INLINE
void *
506 atomic_add_p(void **p
, void *x
)
509 #if (LG_SIZEOF_PTR == 3)
510 return ((void *)atomic_add_uint64((uint64_t *)p
, (uint64_t)x
));
511 #elif (LG_SIZEOF_PTR == 2)
512 return ((void *)atomic_add_uint32((uint32_t *)p
, (uint32_t)x
));
516 JEMALLOC_INLINE
void *
517 atomic_sub_p(void **p
, void *x
)
520 #if (LG_SIZEOF_PTR == 3)
521 return ((void *)atomic_add_uint64((uint64_t *)p
,
522 (uint64_t)-((int64_t)x
)));
523 #elif (LG_SIZEOF_PTR == 2)
524 return ((void *)atomic_add_uint32((uint32_t *)p
,
525 (uint32_t)-((int32_t)x
)));
530 atomic_cas_p(void **p
, void *c
, void *s
)
533 #if (LG_SIZEOF_PTR == 3)
534 return (atomic_cas_uint64((uint64_t *)p
, (uint64_t)c
, (uint64_t)s
));
535 #elif (LG_SIZEOF_PTR == 2)
536 return (atomic_cas_uint32((uint32_t *)p
, (uint32_t)c
, (uint32_t)s
));
541 atomic_write_p(void **p
, const void *x
)
544 #if (LG_SIZEOF_PTR == 3)
545 atomic_write_uint64((uint64_t *)p
, (uint64_t)x
);
546 #elif (LG_SIZEOF_PTR == 2)
547 atomic_write_uint32((uint32_t *)p
, (uint32_t)x
);
551 /******************************************************************************/
552 /* size_t operations. */
553 JEMALLOC_INLINE
size_t
554 atomic_add_z(size_t *p
, size_t x
)
557 #if (LG_SIZEOF_PTR == 3)
558 return ((size_t)atomic_add_uint64((uint64_t *)p
, (uint64_t)x
));
559 #elif (LG_SIZEOF_PTR == 2)
560 return ((size_t)atomic_add_uint32((uint32_t *)p
, (uint32_t)x
));
564 JEMALLOC_INLINE
size_t
565 atomic_sub_z(size_t *p
, size_t x
)
568 #if (LG_SIZEOF_PTR == 3)
569 return ((size_t)atomic_add_uint64((uint64_t *)p
,
570 (uint64_t)-((int64_t)x
)));
571 #elif (LG_SIZEOF_PTR == 2)
572 return ((size_t)atomic_add_uint32((uint32_t *)p
,
573 (uint32_t)-((int32_t)x
)));
578 atomic_cas_z(size_t *p
, size_t c
, size_t s
)
581 #if (LG_SIZEOF_PTR == 3)
582 return (atomic_cas_uint64((uint64_t *)p
, (uint64_t)c
, (uint64_t)s
));
583 #elif (LG_SIZEOF_PTR == 2)
584 return (atomic_cas_uint32((uint32_t *)p
, (uint32_t)c
, (uint32_t)s
));
589 atomic_write_z(size_t *p
, size_t x
)
592 #if (LG_SIZEOF_PTR == 3)
593 atomic_write_uint64((uint64_t *)p
, (uint64_t)x
);
594 #elif (LG_SIZEOF_PTR == 2)
595 atomic_write_uint32((uint32_t *)p
, (uint32_t)x
);
599 /******************************************************************************/
600 /* unsigned operations. */
601 JEMALLOC_INLINE
unsigned
602 atomic_add_u(unsigned *p
, unsigned x
)
605 #if (LG_SIZEOF_INT == 3)
606 return ((unsigned)atomic_add_uint64((uint64_t *)p
, (uint64_t)x
));
607 #elif (LG_SIZEOF_INT == 2)
608 return ((unsigned)atomic_add_uint32((uint32_t *)p
, (uint32_t)x
));
612 JEMALLOC_INLINE
unsigned
613 atomic_sub_u(unsigned *p
, unsigned x
)
616 #if (LG_SIZEOF_INT == 3)
617 return ((unsigned)atomic_add_uint64((uint64_t *)p
,
618 (uint64_t)-((int64_t)x
)));
619 #elif (LG_SIZEOF_INT == 2)
620 return ((unsigned)atomic_add_uint32((uint32_t *)p
,
621 (uint32_t)-((int32_t)x
)));
626 atomic_cas_u(unsigned *p
, unsigned c
, unsigned s
)
629 #if (LG_SIZEOF_INT == 3)
630 return (atomic_cas_uint64((uint64_t *)p
, (uint64_t)c
, (uint64_t)s
));
631 #elif (LG_SIZEOF_INT == 2)
632 return (atomic_cas_uint32((uint32_t *)p
, (uint32_t)c
, (uint32_t)s
));
637 atomic_write_u(unsigned *p
, unsigned x
)
640 #if (LG_SIZEOF_INT == 3)
641 atomic_write_uint64((uint64_t *)p
, (uint64_t)x
);
642 #elif (LG_SIZEOF_INT == 2)
643 atomic_write_uint32((uint32_t *)p
, (uint32_t)x
);
647 /******************************************************************************/
650 #endif /* JEMALLOC_H_INLINES */
651 /******************************************************************************/