]> git.proxmox.com Git - qemu.git/blame - include/qemu/atomic.h
tcg/aarch64: Implement tlb lookup fast path
[qemu.git] / include / qemu / atomic.h
CommitLineData
85199474
MT
1#ifndef __QEMU_BARRIER_H
2#define __QEMU_BARRIER_H 1
3
1d93f0f0
JK
4/* Compiler barrier */
5#define barrier() asm volatile("" ::: "memory")
6
a281ebc1 7#if defined(__i386__)
e2251708 8
1de7afc9 9#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
1d31fca4 10
e2251708 11/*
a821ce59 12 * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
e2251708
DG
13 * on x86(well, a compiler barrier only). Well, at least as long as
14 * qemu doesn't do accesses to write-combining memory or non-temporal
15 * load/stores from C code.
16 */
17#define smp_wmb() barrier()
a821ce59 18#define smp_rmb() barrier()
52e850de 19
a281ebc1
MT
20/*
21 * We use GCC builtin if it's available, as that can use
22 * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
23 * However, on i386, there seem to be known bugs as recently as 4.3.
24 * */
610b823e 25#if QEMU_GNUC_PREREQ(4, 4)
a281ebc1
MT
26#define smp_mb() __sync_synchronize()
27#else
28#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
29#endif
30
31#elif defined(__x86_64__)
32
33#define smp_wmb() barrier()
a821ce59 34#define smp_rmb() barrier()
a281ebc1 35#define smp_mb() asm volatile("mfence" ::: "memory")
e2251708 36
463ce4ae 37#elif defined(_ARCH_PPC)
e2251708
DG
38
39/*
a281ebc1 40 * We use an eieio() for wmb() on powerpc. This assumes we don't
e2251708
DG
41 * need to order cacheable and non-cacheable stores with respect to
42 * each other
43 */
44#define smp_wmb() asm volatile("eieio" ::: "memory")
a821ce59
MT
45
46#if defined(__powerpc64__)
47#define smp_rmb() asm volatile("lwsync" ::: "memory")
48#else
49#define smp_rmb() asm volatile("sync" ::: "memory")
50#endif
51
a281ebc1 52#define smp_mb() asm volatile("sync" ::: "memory")
e2251708
DG
53
54#else
55
56/*
57 * For (host) platforms we don't have explicit barrier definitions
58 * for, we use the gcc __sync_synchronize() primitive to generate a
59 * full barrier. This should be safe on all platforms, though it may
a821ce59 60 * be overkill for wmb() and rmb().
e2251708
DG
61 */
62#define smp_wmb() __sync_synchronize()
a281ebc1 63#define smp_mb() __sync_synchronize()
a821ce59 64#define smp_rmb() __sync_synchronize()
e2251708
DG
65
66#endif
67
85199474 68#endif