]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/mb.h
New upstream version 1.22.1+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / mb.h
CommitLineData
970d7e83
LB
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4#endif /* JEMALLOC_H_TYPES */
5/******************************************************************************/
6#ifdef JEMALLOC_H_STRUCTS
7
8#endif /* JEMALLOC_H_STRUCTS */
9/******************************************************************************/
10#ifdef JEMALLOC_H_EXTERNS
11
12#endif /* JEMALLOC_H_EXTERNS */
13/******************************************************************************/
14#ifdef JEMALLOC_H_INLINES
15
16#ifndef JEMALLOC_ENABLE_INLINE
17void mb_write(void);
18#endif
19
20#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
21#ifdef __i386__
22/*
23 * According to the Intel Architecture Software Developer's Manual, current
24 * processors execute instructions in order from the perspective of other
25 * processors in a multiprocessor system, but 1) Intel reserves the right to
26 * change that, and 2) the compiler's optimizer could re-order instructions if
27 * there weren't some form of barrier. Therefore, even if running on an
28 * architecture that does not need memory barriers (everything through at least
29 * i686), an "optimizer barrier" is necessary.
30 */
31JEMALLOC_INLINE void
32mb_write(void)
33{
34
35# if 0
36 /* This is a true memory barrier. */
37 asm volatile ("pusha;"
38 "xor %%eax,%%eax;"
39 "cpuid;"
40 "popa;"
41 : /* Outputs. */
42 : /* Inputs. */
43 : "memory" /* Clobbers. */
44 );
3b2f2976 45# else
970d7e83
LB
46 /*
47 * This is hopefully enough to keep the compiler from reordering
48 * instructions around this one.
49 */
50 asm volatile ("nop;"
51 : /* Outputs. */
52 : /* Inputs. */
53 : "memory" /* Clobbers. */
54 );
3b2f2976 55# endif
970d7e83
LB
56}
57#elif (defined(__amd64__) || defined(__x86_64__))
58JEMALLOC_INLINE void
59mb_write(void)
60{
61
62 asm volatile ("sfence"
63 : /* Outputs. */
64 : /* Inputs. */
65 : "memory" /* Clobbers. */
66 );
67}
68#elif defined(__powerpc__)
69JEMALLOC_INLINE void
70mb_write(void)
71{
72
73 asm volatile ("eieio"
74 : /* Outputs. */
75 : /* Inputs. */
76 : "memory" /* Clobbers. */
77 );
78}
3b2f2976 79#elif defined(__sparc__) && defined(__arch64__)
970d7e83
LB
80JEMALLOC_INLINE void
81mb_write(void)
82{
83
84 asm volatile ("membar #StoreStore"
85 : /* Outputs. */
86 : /* Inputs. */
87 : "memory" /* Clobbers. */
88 );
89}
90#elif defined(__tile__)
91JEMALLOC_INLINE void
92mb_write(void)
93{
94
95 __sync_synchronize();
96}
97#else
98/*
99 * This is much slower than a simple memory barrier, but the semantics of mutex
100 * unlock make this work.
101 */
102JEMALLOC_INLINE void
103mb_write(void)
104{
105 malloc_mutex_t mtx;
106
3b2f2976
XL
107 malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
108 malloc_mutex_lock(TSDN_NULL, &mtx);
109 malloc_mutex_unlock(TSDN_NULL, &mtx);
970d7e83
LB
110}
111#endif
112#endif
113
114#endif /* JEMALLOC_H_INLINES */
115/******************************************************************************/