]>
Commit | Line | Data |
---|---|---|
1 | #ifndef __QEMU_BARRIER_H | |
2 | #define __QEMU_BARRIER_H 1 | |
3 | ||
4 | /* Compiler barrier */ | |
5 | #define barrier() asm volatile("" ::: "memory") | |
6 | ||
7 | #if defined(__i386__) | |
8 | ||
9 | #include "compiler.h" /* QEMU_GNUC_PREREQ */ | |
10 | ||
11 | /* | |
12 | * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops | |
13 | * on x86(well, a compiler barrier only). Well, at least as long as | |
14 | * qemu doesn't do accesses to write-combining memory or non-temporal | |
15 | * load/stores from C code. | |
16 | */ | |
17 | #define smp_wmb() barrier() | |
18 | #define smp_rmb() barrier() | |
19 | /* | |
20 | * We use GCC builtin if it's available, as that can use | |
21 | * mfence on 32 bit as well, e.g. if built with -march=pentium-m. | |
22 | * However, on i386, there seem to be known bugs as recently as 4.3. | |
23 | * */ | |
24 | #if QEMU_GNUC_PREREQ(4, 4) | |
25 | #define smp_mb() __sync_synchronize() | |
26 | #else | |
27 | #define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory") | |
28 | #endif | |
29 | ||
30 | #elif defined(__x86_64__) | |
31 | ||
32 | #define smp_wmb() barrier() | |
33 | #define smp_rmb() barrier() | |
34 | #define smp_mb() asm volatile("mfence" ::: "memory") | |
35 | ||
36 | #elif defined(_ARCH_PPC) | |
37 | ||
38 | /* | |
39 | * We use an eieio() for wmb() on powerpc. This assumes we don't | |
40 | * need to order cacheable and non-cacheable stores with respect to | |
41 | * each other | |
42 | */ | |
43 | #define smp_wmb() asm volatile("eieio" ::: "memory") | |
44 | ||
45 | #if defined(__powerpc64__) | |
46 | #define smp_rmb() asm volatile("lwsync" ::: "memory") | |
47 | #else | |
48 | #define smp_rmb() asm volatile("sync" ::: "memory") | |
49 | #endif | |
50 | ||
51 | #define smp_mb() asm volatile("sync" ::: "memory") | |
52 | ||
53 | #else | |
54 | ||
55 | /* | |
56 | * For (host) platforms we don't have explicit barrier definitions | |
57 | * for, we use the gcc __sync_synchronize() primitive to generate a | |
58 | * full barrier. This should be safe on all platforms, though it may | |
59 | * be overkill for wmb() and rmb(). | |
60 | */ | |
61 | #define smp_wmb() __sync_synchronize() | |
62 | #define smp_mb() __sync_synchronize() | |
63 | #define smp_rmb() __sync_synchronize() | |
64 | ||
65 | #endif | |
66 | ||
67 | #endif |