]> git.proxmox.com Git - qemu.git/blame - qemu-barrier.h
msi: Guard msi/msix_write_config with msi_present
[qemu.git] / qemu-barrier.h
CommitLineData
85199474
MT
1#ifndef __QEMU_BARRIER_H
2#define __QEMU_BARRIER_H 1
3
1d93f0f0
JK
4/* Compiler barrier */
5#define barrier() asm volatile("" ::: "memory")
6
a281ebc1 7#if defined(__i386__)
e2251708
DG
8
9/*
a821ce59 10 * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
e2251708
DG
11 * on x86(well, a compiler barrier only). Well, at least as long as
12 * qemu doesn't do accesses to write-combining memory or non-temporal
13 * load/stores from C code.
14 */
15#define smp_wmb() barrier()
a821ce59 16#define smp_rmb() barrier()
a281ebc1
MT
17/*
18 * We use GCC builtin if it's available, as that can use
19 * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
20 * However, on i386, there seem to be known bugs as recently as 4.3.
21 * */
22#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
23#define smp_mb() __sync_synchronize()
24#else
25#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
26#endif
27
28#elif defined(__x86_64__)
29
30#define smp_wmb() barrier()
a821ce59 31#define smp_rmb() barrier()
a281ebc1 32#define smp_mb() asm volatile("mfence" ::: "memory")
e2251708 33
463ce4ae 34#elif defined(_ARCH_PPC)
e2251708
DG
35
36/*
a281ebc1 37 * We use an eieio() for wmb() on powerpc. This assumes we don't
e2251708
DG
38 * need to order cacheable and non-cacheable stores with respect to
39 * each other
40 */
41#define smp_wmb() asm volatile("eieio" ::: "memory")
a821ce59
MT
42
43#if defined(__powerpc64__)
44#define smp_rmb() asm volatile("lwsync" ::: "memory")
45#else
46#define smp_rmb() asm volatile("sync" ::: "memory")
47#endif
48
a281ebc1 49#define smp_mb() asm volatile("sync" ::: "memory")
e2251708
DG
50
51#else
52
53/*
54 * For (host) platforms we don't have explicit barrier definitions
55 * for, we use the gcc __sync_synchronize() primitive to generate a
56 * full barrier. This should be safe on all platforms, though it may
a821ce59 57 * be overkill for wmb() and rmb().
e2251708
DG
58 */
59#define smp_wmb() __sync_synchronize()
a281ebc1 60#define smp_mb() __sync_synchronize()
a821ce59 61#define smp_rmb() __sync_synchronize()
e2251708
DG
62
63#endif
64
85199474 65#endif