]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Jan 2016 00:44:24 +0000 (16:44 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Jan 2016 00:44:24 +0000 (16:44 -0800)
Pull virtio barrier rework+fixes from Michael Tsirkin:
 "This adds a new kind of barrier, and reworks virtio and xen to use it.

  Plus some fixes here and there"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits)
  checkpatch: add virt barriers
  checkpatch: check for __smp outside barrier.h
  checkpatch.pl: add missing memory barriers
  virtio: make find_vqs() checkpatch.pl-friendly
  virtio_balloon: fix race between migration and ballooning
  virtio_balloon: fix race by fill and leak
  s390: more efficient smp barriers
  s390: use generic memory barriers
  xen/events: use virt_xxx barriers
  xen/io: use virt_xxx barriers
  xenbus: use virt_xxx barriers
  virtio_ring: use virt_store_mb
  sh: move xchg_cmpxchg to a header by itself
  sh: support 1 and 2 byte xchg
  virtio_ring: update weak barriers to use virt_xxx
  Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
  asm-generic: implement virt_xxx memory barriers
  x86: define __smp_xxx
  xtensa: define __smp_xxx
  tile: define __smp_xxx
  ...

44 files changed:
Documentation/memory-barriers.txt
arch/arm/include/asm/barrier.h
arch/arm64/include/asm/barrier.h
arch/blackfin/include/asm/barrier.h
arch/ia64/include/asm/barrier.h
arch/ia64/kernel/iosapic.c
arch/metag/include/asm/barrier.h
arch/mips/include/asm/barrier.h
arch/powerpc/include/asm/barrier.h
arch/s390/include/asm/barrier.h
arch/sh/include/asm/barrier.h
arch/sh/include/asm/cmpxchg-grb.h
arch/sh/include/asm/cmpxchg-irq.h
arch/sh/include/asm/cmpxchg-llsc.h
arch/sh/include/asm/cmpxchg-xchg.h [new file with mode: 0644]
arch/sh/include/asm/cmpxchg.h
arch/sparc/include/asm/barrier_32.h
arch/sparc/include/asm/barrier_64.h
arch/sparc/include/asm/processor.h
arch/tile/include/asm/barrier.h
arch/x86/include/asm/barrier.h
arch/x86/um/asm/barrier.h
arch/xtensa/include/asm/barrier.h
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/misc/mic/card/mic_virtio.c
drivers/remoteproc/remoteproc_virtio.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/s390/virtio/kvm_virtio.c
drivers/s390/virtio/virtio_ccw.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_input.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_ring.c
drivers/xen/events/events_fifo.c
drivers/xen/xenbus/xenbus_comms.c
include/asm-generic/barrier.h
include/linux/virtio_config.h
include/linux/virtio_ring.h
include/xen/interface/io/ring.h
mm/balloon_compaction.c
scripts/checkpatch.pl

index a61be39c7b516a1e3b081afd1fea02a3f1068187..904ee42d078e51d8b43fe6548611219c2c0444c0 100644 (file)
@@ -1655,17 +1655,18 @@ macro is a good place to start looking.
 SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
 systems because it is assumed that a CPU will appear to be self-consistent,
 and will order overlapping accesses correctly with respect to itself.
+However, see the subsection on "Virtual Machine Guests" below.
 
 [!] Note that SMP memory barriers _must_ be used to control the ordering of
 references to shared memory on SMP systems, though the use of locking instead
 is sufficient.
 
 Mandatory barriers should not be used to control SMP effects, since mandatory
-barriers unnecessarily impose overhead on UP systems. They may, however, be
-used to control MMIO effects on accesses through relaxed memory I/O windows.
-These are required even on non-SMP systems as they affect the order in which
-memory operations appear to a device by prohibiting both the compiler and the
-CPU from reordering them.
+barriers impose unnecessary overhead on both SMP and UP systems. They may,
+however, be used to control MMIO effects on accesses through relaxed memory I/O
+windows.  These barriers are required even on non-SMP systems as they affect
+the order in which memory operations appear to a device by prohibiting both the
+compiler and the CPU from reordering them.
 
 
 There are some more advanced barrier functions:
@@ -2948,6 +2949,23 @@ The Alpha defines the Linux kernel's memory barrier model.
 
 See the subsection on "Cache Coherency" above.
 
+VIRTUAL MACHINE GUESTS
+-------------------
+
+Guests running within virtual machines might be affected by SMP effects even if
+the guest itself is compiled without SMP support.  This is an artifact of
+interfacing with an SMP host while running an UP kernel.  Using mandatory
+barriers for this use-case would be possible but is often suboptimal.
+
+To handle this case optimally, low-level virt_mb() etc macros are available.
+These have the same effect as smp_mb() etc when SMP is enabled, but generate
+identical code for SMP and non-SMP systems. For example, virtual machine guests
+should use virt_mb() rather than smp_mb() when synchronizing against a
+(possibly SMP) host.
+
+These are equivalent to smp_mb() etc counterparts in all other respects,
+in particular, they do not control MMIO effects: to control
+MMIO effects, use mandatory barriers.
 
 ============
 EXAMPLE USES
index 3ff5642d9788490167a87629fe6774297adbd67d..112cc1a5d47f2ccac216d568e7ba30e2c0eaa8e0 100644 (file)
@@ -60,38 +60,11 @@ extern void arm_heavy_mb(void);
 #define dma_wmb()      barrier()
 #endif
 
-#ifndef CONFIG_SMP
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#else
-#define smp_mb()       dmb(ish)
-#define smp_rmb()      smp_mb()
-#define smp_wmb()      dmb(ishst)
-#endif
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
-#define smp_load_acquire(p)                                            \
-({                                                                     \
-       typeof(*p) ___p1 = READ_ONCE(*p);                               \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       ___p1;                                                          \
-})
-
-#define read_barrier_depends()         do { } while(0)
-#define smp_read_barrier_depends()     do { } while(0)
-
-#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define __smp_mb()     dmb(ish)
+#define __smp_rmb()    __smp_mb()
+#define __smp_wmb()    dmb(ishst)
 
-#define smp_mb__before_atomic()        smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_BARRIER_H */
index 9622eb48f894db3fdb06db5441339babc2a1fdeb..dae5c49618db38021b418379bcb9fe52529ec862 100644 (file)
 #define dma_rmb()      dmb(oshld)
 #define dma_wmb()      dmb(oshst)
 
-#define smp_mb()       dmb(ish)
-#define smp_rmb()      dmb(ishld)
-#define smp_wmb()      dmb(ishst)
+#define __smp_mb()     dmb(ish)
+#define __smp_rmb()    dmb(ishld)
+#define __smp_wmb()    dmb(ishst)
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                              \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        switch (sizeof(*p)) {                                           \
@@ -62,7 +62,7 @@ do {                                                                  \
        }                                                               \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        union { typeof(*p) __val; char __c[1]; } __u;                   \
        compiletime_assert_atomic_type(*p);                             \
@@ -91,14 +91,7 @@ do {                                                                 \
        __u.__val;                                                      \
 })
 
-#define read_barrier_depends()         do { } while(0)
-#define smp_read_barrier_depends()     do { } while(0)
-
-#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-#define nop()          asm volatile("nop");
-
-#define smp_mb__before_atomic()        smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#include <asm-generic/barrier.h>
 
 #endif /* __ASSEMBLY__ */
 
index dfb66fe88b34542e6e9286593e6284fd73a8ece2..7cca51cae5ffb9c74edae91ae4296165f8083e7e 100644 (file)
@@ -78,8 +78,8 @@
 
 #endif /* !CONFIG_SMP */
 
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
 
 #include <asm-generic/barrier.h>
 
index 209c4b817c958e25eea0298afe121fc225f1e9e3..588f1614cafc2a86c2dcb816c4c1f71d633551f2 100644 (file)
 #define dma_rmb()      mb()
 #define dma_wmb()      mb()
 
-#ifdef CONFIG_SMP
-# define smp_mb()      mb()
-#else
-# define smp_mb()      barrier()
-#endif
+# define __smp_mb()    mb()
 
-#define smp_rmb()      smp_mb()
-#define smp_wmb()      smp_mb()
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
-
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
 
 /*
  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
  * need for asm trickery!
  */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                              \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
@@ -77,12 +67,12 @@ do {                                                                        \
        ___p1;                                                          \
 })
 
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
 /*
  * The group barrier in front of the rsm & ssm are necessary to ensure
  * that none of the previous instructions in the same group are
  * affected by the rsm/ssm.
  */
 
+#include <asm-generic/barrier.h>
+
 #endif /* _ASM_IA64_BARRIER_H */
index d2fae054d988cdc9326ad249434bb8ec50b0b45e..90fde5b8669d96411bc9741801cf24efa2a9aa07 100644 (file)
@@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
 }
 
 static void
-nop (struct irq_data *data)
+iosapic_nop (struct irq_data *data)
 {
        /* do nothing... */
 }
@@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data)
 #define iosapic_shutdown_level_irq     mask_irq
 #define iosapic_enable_level_irq       unmask_irq
 #define iosapic_disable_level_irq      mask_irq
-#define iosapic_ack_level_irq          nop
+#define iosapic_ack_level_irq          iosapic_nop
 
 static struct irq_chip irq_type_iosapic_level = {
        .name =                 "IO-SAPIC-level",
@@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data)
 }
 
 #define iosapic_enable_edge_irq                unmask_irq
-#define iosapic_disable_edge_irq       nop
+#define iosapic_disable_edge_irq       iosapic_nop
 
 static struct irq_chip irq_type_iosapic_edge = {
        .name =                 "IO-SAPIC-edge",
index 172b7e5efc53f99145f18eccc90d3beb72d0a435..5418517aa5eb35fdec4ab71656ff9daa5fc379f9 100644 (file)
@@ -44,16 +44,6 @@ static inline void wr_fence(void)
 #define rmb()          barrier()
 #define wmb()          mb()
 
-#define dma_rmb()      rmb()
-#define dma_wmb()      wmb()
-
-#ifndef CONFIG_SMP
-#define fence()                do { } while (0)
-#define smp_mb()        barrier()
-#define smp_rmb()       barrier()
-#define smp_wmb()       barrier()
-#else
-
 #ifdef CONFIG_METAG_SMP_WRITE_REORDERING
 /*
  * Write to the atomic memory unlock system event register (command 0). This is
@@ -63,45 +53,32 @@ static inline void wr_fence(void)
  * incoherence). It is therefore ineffective if used after and on the same
  * thread as a write.
  */
-static inline void fence(void)
+static inline void metag_fence(void)
 {
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
        barrier();
        *flushptr = 0;
        barrier();
 }
-#define smp_mb()        fence()
-#define smp_rmb()       fence()
-#define smp_wmb()       barrier()
+#define __smp_mb()     metag_fence()
+#define __smp_rmb()    metag_fence()
+#define __smp_wmb()    barrier()
 #else
-#define fence()                do { } while (0)
-#define smp_mb()        barrier()
-#define smp_rmb()       barrier()
-#define smp_wmb()       barrier()
-#endif
+#define metag_fence()  do { } while (0)
+#define __smp_mb()     barrier()
+#define __smp_rmb()    barrier()
+#define __smp_wmb()    barrier()
 #endif
 
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
+#ifdef CONFIG_SMP
+#define fence()                metag_fence()
+#else
+#define fence()                do { } while (0)
+#endif
 
-#define smp_load_acquire(p)                                            \
-({                                                                     \
-       typeof(*p) ___p1 = READ_ONCE(*p);                               \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       ___p1;                                                          \
-})
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
 
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#include <asm-generic/barrier.h>
 
 #endif /* _ASM_METAG_BARRIER_H */
index 752e0b86c171005882be2f25df45aecd246cd2a7..d296633d890e56c892059871499e989c85a93203 100644 (file)
@@ -10,9 +10,6 @@
 
 #include <asm/addrspace.h>
 
-#define read_barrier_depends()         do { } while(0)
-#define smp_read_barrier_depends()     do { } while(0)
-
 #ifdef CONFIG_CPU_HAS_SYNC
 #define __sync()                               \
        __asm__ __volatile__(                   \
 
 #define wmb()          fast_wmb()
 #define rmb()          fast_rmb()
-#define dma_wmb()      fast_wmb()
-#define dma_rmb()      fast_rmb()
 
-#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#if defined(CONFIG_WEAK_ORDERING)
 # ifdef CONFIG_CPU_CAVIUM_OCTEON
-#  define smp_mb()     __sync()
-#  define smp_rmb()    barrier()
-#  define smp_wmb()    __syncw()
+#  define __smp_mb()   __sync()
+#  define __smp_rmb()  barrier()
+#  define __smp_wmb()  __syncw()
 # else
-#  define smp_mb()     __asm__ __volatile__("sync" : : :"memory")
-#  define smp_rmb()    __asm__ __volatile__("sync" : : :"memory")
-#  define smp_wmb()    __asm__ __volatile__("sync" : : :"memory")
+#  define __smp_mb()   __asm__ __volatile__("sync" : : :"memory")
+#  define __smp_rmb()  __asm__ __volatile__("sync" : : :"memory")
+#  define __smp_wmb()  __asm__ __volatile__("sync" : : :"memory")
 # endif
 #else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
+#define __smp_mb()     barrier()
+#define __smp_rmb()    barrier()
+#define __smp_wmb()    barrier()
 #endif
 
 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
 #define __WEAK_LLSC_MB         "               \n"
 #endif
 
-#define smp_store_mb(var, value) \
-       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
 #define smp_llsc_mb()  __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #define smp_mb__before_llsc() smp_wmb()
+#define __smp_mb__before_llsc() __smp_wmb()
 /* Cause previous writes to become visible on all CPUs as soon as possible */
 #define nudge_writes() __asm__ __volatile__(".set push\n\t"            \
                                            ".set arch=octeon\n\t"      \
                                            ".set pop" : : : "memory")
 #else
 #define smp_mb__before_llsc() smp_llsc_mb()
+#define __smp_mb__before_llsc() smp_llsc_mb()
 #define nudge_writes() mb()
 #endif
 
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
-#define smp_load_acquire(p)                                            \
-({                                                                     \
-       typeof(*p) ___p1 = READ_ONCE(*p);                               \
-       compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       ___p1;                                                          \
-})
-
-#define smp_mb__before_atomic()        smp_mb__before_llsc()
-#define smp_mb__after_atomic() smp_llsc_mb()
+#define __smp_mb__before_atomic()      __smp_mb__before_llsc()
+#define __smp_mb__after_atomic()       smp_llsc_mb()
+
+#include <asm-generic/barrier.h>
 
 #endif /* __ASM_BARRIER_H */
index a7af5fb7b91476148e9ee2ce44d4d93ed3ad70fd..c0deafc212b8b9856bcb923a1bf07c439452d1e9 100644 (file)
@@ -34,8 +34,6 @@
 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
 
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
 #ifdef __SUBARCH_HAS_LWSYNC
 #    define SMPWMB      LWSYNC
 #else
 #define dma_rmb()      __lwsync()
 #define dma_wmb()      __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 
-#ifdef CONFIG_SMP
-#define smp_lwsync()   __lwsync()
-
-#define smp_mb()       mb()
-#define smp_rmb()      __lwsync()
-#define smp_wmb()      __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
-#else
-#define smp_lwsync()   barrier()
+#define __smp_lwsync() __lwsync()
 
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#endif /* CONFIG_SMP */
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
+#define __smp_mb()     mb()
+#define __smp_rmb()    __lwsync()
+#define __smp_wmb()    __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 
 /*
  * This is a barrier which prevents following instructions from being
 #define data_barrier(x)        \
        asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                              \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_lwsync();                                                   \
+       __smp_lwsync();                                                 \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
-       smp_lwsync();                                                   \
+       __smp_lwsync();                                                 \
        ___p1;                                                          \
 })
 
-#define smp_mb__before_atomic()     smp_mb()
-#define smp_mb__after_atomic()      smp_mb()
 #define smp_mb__before_spinlock()   smp_mb()
 
+#include <asm-generic/barrier.h>
+
 #endif /* _ASM_POWERPC_BARRIER_H */
index 7ffd0b19135c8d46770f1f37ca6d4da19e352de0..5c8db3ce61c8e34009a5f236a378e81783c57e98 100644 (file)
 #define wmb()                          barrier()
 #define dma_rmb()                      mb()
 #define dma_wmb()                      mb()
-#define smp_mb()                       mb()
-#define smp_rmb()                      rmb()
-#define smp_wmb()                      wmb()
+#define __smp_mb()                     mb()
+#define __smp_rmb()                    rmb()
+#define __smp_wmb()                    wmb()
 
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
-
-#define smp_mb__before_atomic()                smp_mb()
-#define smp_mb__after_atomic()         smp_mb()
-
-#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
@@ -53,4 +45,9 @@ do {                                                                  \
        ___p1;                                                          \
 })
 
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+#include <asm-generic/barrier.h>
+
 #endif /* __ASM_BARRIER_H */
index bf91037db4e01f66dcf6a2e84536279dcbf028c1..f887c6465a821b7b96a2a300268f077afd70ae5a 100644 (file)
@@ -32,7 +32,8 @@
 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
 #endif
 
-#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) __smp_store_mb(var, value)
 
 #include <asm-generic/barrier.h>
 
index f848dec9e483de8ed72cf3240c733d52f0da38e5..2ed557b31bd994fa12077c5ba4238613a782ddcc 100644 (file)
@@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       unsigned long retval;
+
+       __asm__ __volatile__ (
+               "   .align  2             \n\t"
+               "   mova    1f,   r0      \n\t" /* r0 = end point */
+               "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+               "   mov    #-6,   r15     \n\t" /* LOGIN */
+               "   mov.w  @%1,   %0      \n\t" /* load  old value */
+               "   extu.w  %0,   %0      \n\t" /* extend as unsigned */
+               "   mov.w   %2,   @%1     \n\t" /* store new value */
+               "1: mov     r1,   r15     \n\t" /* LOGOUT */
+               : "=&r" (retval),
+                 "+r"  (m),
+                 "+r"  (val)           /* inhibit r15 overloading */
+               :
+               : "memory" , "r0", "r1");
+
+       return retval;
+}
+
 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 {
        unsigned long retval;
index bd11f630414ad173640b937c0576a8a12399a42a..f88877257171a9add838e97a2c1bd8a0339b227b 100644 (file)
@@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       local_irq_save(flags);
+       retval = *m;
+       *m = val;
+       local_irq_restore(flags);
+       return retval;
+}
+
 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 {
        unsigned long flags, retval;
index 47136661a203a96ad2c5d676fff377bf789cbafb..fcfd32271bff7f6d890922eb7a7b2dea7da34186 100644 (file)
@@ -22,29 +22,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
-static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
-{
-       unsigned long retval;
-       unsigned long tmp;
-
-       __asm__ __volatile__ (
-               "1:                                     \n\t"
-               "movli.l        @%2, %0 ! xchg_u8       \n\t"
-               "mov            %0, %1                  \n\t"
-               "mov            %3, %0                  \n\t"
-               "movco.l        %0, @%2                 \n\t"
-               "bf             1b                      \n\t"
-               "synco                                  \n\t"
-               : "=&z"(tmp), "=&r" (retval)
-               : "r" (m), "r" (val & 0xff)
-               : "t", "memory"
-       );
-
-       return retval;
-}
-
 static inline unsigned long
-__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
+__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
 {
        unsigned long retval;
        unsigned long tmp;
@@ -68,4 +47,6 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
        return retval;
 }
 
+#include <asm/cmpxchg-xchg.h>
+
 #endif /* __ASM_SH_CMPXCHG_LLSC_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
new file mode 100644 (file)
index 0000000..7219719
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __ASM_SH_CMPXCHG_XCHG_H
+#define __ASM_SH_CMPXCHG_XCHG_H
+
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See the
+ * file "COPYING" in the main directory of this archive for more details.
+ */
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+/*
+ * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg.
+ * Note: this header isn't self-contained: before including it, __cmpxchg_u32
+ * must be defined first.
+ */
+static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
+{
+       int off = (unsigned long)ptr % sizeof(u32);
+       volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+       int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
+#else
+       int bitoff = off * BITS_PER_BYTE;
+#endif
+       u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+       u32 oldv, newv;
+       u32 ret;
+
+       do {
+               oldv = READ_ONCE(*p);
+               ret = (oldv & bitmask) >> bitoff;
+               newv = (oldv & ~bitmask) | (x << bitoff);
+       } while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+       return ret;
+}
+
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+       return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
+#endif /* __ASM_SH_CMPXCHG_XCHG_H */
index 85c97b188d71647683df8bebc6fd7fbbb3d55442..5225916c10574f20b91db0ae9019ce2c04d36e11 100644 (file)
@@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void);
        case 4:                                         \
                __xchg__res = xchg_u32(__xchg_ptr, x);  \
                break;                                  \
+       case 2:                                         \
+               __xchg__res = xchg_u16(__xchg_ptr, x);  \
+               break;                                  \
        case 1:                                         \
                __xchg__res = xchg_u8(__xchg_ptr, x);   \
                break;                                  \
index ae69eda288f41774e0e799b182672bddb0b9e493..8059130a6ceebdbc544685f72ab53fbf2a44dd3b 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef __SPARC_BARRIER_H
 #define __SPARC_BARRIER_H
 
-#include <asm/processor.h> /* for nop() */
 #include <asm-generic/barrier.h>
 
 #endif /* !(__SPARC_BARRIER_H) */
index 14a928601657da55bce01e1ef37043f3dd35d379..c9f6ee64f41d89bb093ace84603c9f0ed14f1db1 100644 (file)
@@ -37,33 +37,14 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 #define rmb()  __asm__ __volatile__("":::"memory")
 #define wmb()  __asm__ __volatile__("":::"memory")
 
-#define dma_rmb()      rmb()
-#define dma_wmb()      wmb()
-
-#define smp_store_mb(__var, __value) \
-       do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#else
-#define smp_mb()       __asm__ __volatile__("":::"memory")
-#define smp_rmb()      __asm__ __volatile__("":::"memory")
-#define smp_wmb()      __asm__ __volatile__("":::"memory")
-#endif
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
-
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                              \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
@@ -71,7 +52,9 @@ do {                                                                  \
        ___p1;                                                          \
 })
 
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+#include <asm-generic/barrier.h>
 
 #endif /* !(__SPARC64_BARRIER_H) */
index 2fe99e66e760f0072dd6e1d66676016070baced6..9da9646bf6c6b326ee6d6f47b1d60bd4dd970453 100644 (file)
@@ -5,7 +5,4 @@
 #else
 #include <asm/processor_32.h>
 #endif
-
-#define nop()          __asm__ __volatile__ ("nop")
-
 #endif
index 96a42ae79f4dd6c280970d3fc88d589c762b0910..d55222806c2f7d7ee96f135da21ec787d1123610 100644 (file)
@@ -79,11 +79,12 @@ mb_incoherent(void)
  * But after the word is updated, the routine issues an "mf" before returning,
  * and since it's a function call, we don't even need a compiler barrier.
  */
-#define smp_mb__before_atomic()        smp_mb()
-#define smp_mb__after_atomic() do { } while (0)
+#define __smp_mb__before_atomic()      __smp_mb()
+#define __smp_mb__after_atomic()       do { } while (0)
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
 #else /* 64 bit */
-#define smp_mb__before_atomic()        smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#define __smp_mb__before_atomic()      __smp_mb()
+#define __smp_mb__after_atomic()       __smp_mb()
 #endif
 
 #include <asm-generic/barrier.h>
index 0681d2532527f6027a4dada67d1d18156a4cebd2..a584e1c50918406a0398cf4a013432e47abf143e 100644 (file)
 #endif
 #define dma_wmb()      barrier()
 
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      dma_rmb()
-#define smp_wmb()      barrier()
-#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else /* !SMP */
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
-#endif /* SMP */
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
+#define __smp_mb()     mb()
+#define __smp_rmb()    dma_rmb()
+#define __smp_wmb()    barrier()
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
 #if defined(CONFIG_X86_PPRO_FENCE)
 
  * model and we should fall back to full barriers.
  */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       __smp_mb();                                                     \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       __smp_mb();                                                     \
        ___p1;                                                          \
 })
 
 #else /* regular x86 TSO memory ordering */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
@@ -88,7 +78,9 @@ do {                                                                  \
 #endif
 
 /* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+#include <asm-generic/barrier.h>
 
 #endif /* _ASM_X86_BARRIER_H */
index 755481f14d90f266fb6afc253fb674a7d34d53c4..174781a404ff3cfbeb1f9dccaef820f7aa2e9a51 100644 (file)
 #endif /* CONFIG_X86_PPRO_FENCE */
 #define dma_wmb()      barrier()
 
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
+#include <asm-generic/barrier.h>
 
 #endif
index 5b88774c75abaee187cc5b1f3915858b3452d3b4..956596e4d437470c9d451d3fdaff6577c49477f5 100644 (file)
@@ -13,8 +13,8 @@
 #define rmb() barrier()
 #define wmb() mb()
 
-#define smp_mb__before_atomic()                barrier()
-#define smp_mb__after_atomic()         barrier()
+#define __smp_mb__before_atomic()              barrier()
+#define __smp_mb__after_atomic()               barrier()
 
 #include <asm-generic/barrier.h>
 
index 06496a128162218a263bac88e396602d3f11b7fc..4150873d432efb1fb05cb2129e93267436620d0f 100644 (file)
@@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
        static vq_callback_t *callbacks[] = {
                virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
        };
-       static const char *names[] = { "control", "cursor" };
+       static const char * const names[] = { "control", "cursor" };
 
        struct virtio_gpu_device *vgdev;
        /* this will expand later */
index e486a0c26267abb2f40375f93df3fabffdc91259..f6ed57d3125c753e6abe42838f2c6bbf9a407c82 100644 (file)
@@ -311,7 +311,7 @@ unmap:
 static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                        struct virtqueue *vqs[],
                        vq_callback_t *callbacks[],
-                       const char *names[])
+                       const char * const names[])
 {
        struct mic_vdev *mvdev = to_micvdev(vdev);
        struct mic_device_ctrl __iomem *dc = mvdev->dc;
index e1a10232a9437b5884e16965d4b5bced5b069ffe..e44872fb9e5e23ff2aaf284a675bab940d67b172 100644 (file)
@@ -147,7 +147,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
-                      const char *names[])
+                      const char * const names[])
 {
        struct rproc *rproc = vdev_to_rproc(vdev);
        int i, ret;
index 73354ee278771ac0be6fc9fcaa40b96f9964bc9b..1fcd27c1f1833ea5c949cba01bb7209072b1b016 100644 (file)
@@ -945,7 +945,7 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
 static int rpmsg_probe(struct virtio_device *vdev)
 {
        vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
-       const char *names[] = { "input", "output" };
+       static const char * const names[] = { "input", "output" };
        struct virtqueue *vqs[2];
        struct virtproc_info *vrp;
        void *bufs_va;
index 53fb975c404b19b3f1d50f1f226cd0155accd683..1d060fd293a3b8e8a4d4095b2ad84241913272d5 100644 (file)
@@ -255,7 +255,7 @@ static void kvm_del_vqs(struct virtio_device *vdev)
 static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                        struct virtqueue *vqs[],
                        vq_callback_t *callbacks[],
-                       const char *names[])
+                       const char * const names[])
 {
        struct kvm_device *kdev = to_kvmdev(vdev);
        int i;
index 1b831598df7c6544057f23ef69da5ea54bbafcdd..bf2d1300a9578817d5b14df9cd8664838d0189e5 100644 (file)
@@ -635,7 +635,7 @@ out:
 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                               struct virtqueue *vqs[],
                               vq_callback_t *callbacks[],
-                              const char *names[])
+                              const char * const names[])
 {
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
        unsigned long *indicatorp = NULL;
index 7efc32945810e8fdcafa76a6328517f35d65ea3c..0c3691f46575b9e82039b27d7e452075699f9514 100644 (file)
@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
         */
        if (vb->num_pfns != 0)
                tell_host(vb, vb->deflate_vq);
-       mutex_unlock(&vb->balloon_lock);
        release_pages_balloon(vb);
+       mutex_unlock(&vb->balloon_lock);
        return num_freed_pages;
 }
 
@@ -388,7 +388,7 @@ static int init_vqs(struct virtio_balloon *vb)
 {
        struct virtqueue *vqs[3];
        vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
-       const char *names[] = { "inflate", "deflate", "stats" };
+       static const char * const names[] = { "inflate", "deflate", "stats" };
        int err, nvqs;
 
        /*
index c96944b59856c10c7d28c189b5ff86dbe4b3c932..350a2a5a49dbedbbfcb45e9fd3ad9be142828294 100644 (file)
@@ -170,7 +170,7 @@ static int virtinput_init_vqs(struct virtio_input *vi)
        struct virtqueue *vqs[2];
        vq_callback_t *cbs[] = { virtinput_recv_events,
                                 virtinput_recv_status };
-       static const char *names[] = { "events", "status" };
+       static const char * const names[] = { "events", "status" };
        int err;
 
        err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
index f499d9da72373d04d115caa4b7b4c9e6585ee965..745c6ee1bb3eab1259522d2833b350aa33ceb1f7 100644 (file)
@@ -482,7 +482,7 @@ error_available:
 static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
-                      const char *names[])
+                      const char * const names[])
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
        unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
index 78f804af6c2020a9b927286e3496f6a2d8541953..36205c27c4d0f19ab61a4aa4175de3dd8785de66 100644 (file)
@@ -296,7 +296,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                              struct virtqueue *vqs[],
                              vq_callback_t *callbacks[],
-                             const char *names[],
+                             const char * const names[],
                              bool use_msix,
                              bool per_vq_vectors)
 {
@@ -376,7 +376,7 @@ error_find:
 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                struct virtqueue *vqs[],
                vq_callback_t *callbacks[],
-               const char *names[])
+               const char * const names[])
 {
        int err;
 
index b976d968e793683a22a6d6c7f213763863ef0bd0..2cc252270b2d1f906044e21b25d021bf82d6089c 100644 (file)
@@ -139,7 +139,7 @@ void vp_del_vqs(struct virtio_device *vdev);
 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
-                      const char *names[]);
+                      const char * const names[]);
 const char *vp_bus_name(struct virtio_device *vdev);
 
 /* Setup the affinity for a virtqueue:
index 8e5cf194cc0bd003888c1235b8d9bfc1cbc99e9c..c0c11fad4611a72a3712c3ef7471193cb5d8fa5d 100644 (file)
@@ -418,7 +418,7 @@ err_new_queue:
 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                              struct virtqueue *vqs[],
                              vq_callback_t *callbacks[],
-                             const char *names[])
+                             const char * const names[])
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        struct virtqueue *vq;
index ee663c458b20a449c353c5ea0df4632933087e66..e12e385f7ac3507e60a3c77b0ff1b25a3c168b26 100644 (file)
@@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
        /* If we expect an interrupt for the next entry, tell host
         * by writing event index and flush out the write before
         * the read in the next get_buf call. */
-       if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
-               vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
-               virtio_mb(vq->weak_barriers);
-       }
+       if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+               virtio_store_mb(vq->weak_barriers,
+                               &vring_used_event(&vq->vring),
+                               cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
 
 #ifdef DEBUG
        vq->last_add_time_valid = false;
@@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
        }
        /* TODO: tune this threshold */
        bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
-       vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
-       virtio_mb(vq->weak_barriers);
+
+       virtio_store_mb(vq->weak_barriers,
+                       &vring_used_event(&vq->vring),
+                       cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
+
        if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
                END_USE(vq);
                return false;
index 96a1b8da53715e7501d09369ee9ef375f421e936..eff2b88003d930c2085b33820e970d55c1008b9c 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/percpu.h>
 #include <linux/cpu.h>
 
+#include <asm/barrier.h>
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -296,7 +297,7 @@ static void consume_one_event(unsigned cpu,
         * control block.
         */
        if (head == 0) {
-               rmb(); /* Ensure word is up-to-date before reading head. */
+               virt_rmb(); /* Ensure word is up-to-date before reading head. */
                head = control_block->head[priority];
        }
 
index fdb0f339d0a75b06bfe37ec46bc08d90e14a9759..ecdecce80a6c9600fd5b78773cdcca070eb444bb 100644 (file)
@@ -123,14 +123,14 @@ int xb_write(const void *data, unsigned len)
                        avail = len;
 
                /* Must write data /after/ reading the consumer index. */
-               mb();
+               virt_mb();
 
                memcpy(dst, data, avail);
                data += avail;
                len -= avail;
 
                /* Other side must not see new producer until data is there. */
-               wmb();
+               virt_wmb();
                intf->req_prod += avail;
 
                /* Implies mb(): other side will see the updated producer. */
@@ -180,14 +180,14 @@ int xb_read(void *data, unsigned len)
                        avail = len;
 
                /* Must read data /after/ reading the producer index. */
-               rmb();
+               virt_rmb();
 
                memcpy(data, src, avail);
                data += avail;
                len -= avail;
 
                /* Other side must not see free space until we've copied out */
-               mb();
+               virt_mb();
                intf->rsp_cons += avail;
 
                pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
index 0f45f93ef6922ba1417966ba99680a96f81a19f9..1cceca146905013dbe2c346020ad73fe3df68ce5 100644 (file)
 #define read_barrier_depends()         do { } while (0)
 #endif
 
+#ifndef __smp_mb
+#define __smp_mb()     mb()
+#endif
+
+#ifndef __smp_rmb
+#define __smp_rmb()    rmb()
+#endif
+
+#ifndef __smp_wmb
+#define __smp_wmb()    wmb()
+#endif
+
+#ifndef __smp_read_barrier_depends
+#define __smp_read_barrier_depends()   read_barrier_depends()
+#endif
+
 #ifdef CONFIG_SMP
 
 #ifndef smp_mb
-#define smp_mb()       mb()
+#define smp_mb()       __smp_mb()
 #endif
 
 #ifndef smp_rmb
-#define smp_rmb()      rmb()
+#define smp_rmb()      __smp_rmb()
 #endif
 
 #ifndef smp_wmb
-#define smp_wmb()      wmb()
+#define smp_wmb()      __smp_wmb()
 #endif
 
 #ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends()     read_barrier_depends()
+#define smp_read_barrier_depends()     __smp_read_barrier_depends()
 #endif
 
 #else  /* !CONFIG_SMP */
 
 #endif /* CONFIG_SMP */
 
+#ifndef __smp_store_mb
+#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
+#endif
+
+#ifndef __smp_mb__before_atomic
+#define __smp_mb__before_atomic()      __smp_mb()
+#endif
+
+#ifndef __smp_mb__after_atomic
+#define __smp_mb__after_atomic()       __smp_mb()
+#endif
+
+#ifndef __smp_store_release
+#define __smp_store_release(p, v)                                      \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       __smp_mb();                                                     \
+       WRITE_ONCE(*p, v);                                              \
+} while (0)
+#endif
+
+#ifndef __smp_load_acquire
+#define __smp_load_acquire(p)                                          \
+({                                                                     \
+       typeof(*p) ___p1 = READ_ONCE(*p);                               \
+       compiletime_assert_atomic_type(*p);                             \
+       __smp_mb();                                                     \
+       ___p1;                                                          \
+})
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value)  __smp_store_mb(var, value)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic()        __smp_mb__before_atomic()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) __smp_store_release(p, v)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) __smp_load_acquire(p)
+#endif
+
+#else  /* !CONFIG_SMP */
+
 #ifndef smp_store_mb
-#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
 #endif
 
 #ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic()        smp_mb()
+#define smp_mb__before_atomic()        barrier()
 #endif
 
 #ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() smp_mb()
+#define smp_mb__after_atomic() barrier()
 #endif
 
+#ifndef smp_store_release
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
+#endif
 
+#ifndef smp_load_acquire
 #define smp_load_acquire(p)                                            \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       barrier();                                                      \
        ___p1;                                                          \
 })
+#endif
+
+#endif
+
+/* Barriers for virtual machine guests when talking to an SMP host */
+#define virt_mb() __smp_mb()
+#define virt_rmb() __smp_rmb()
+#define virt_wmb() __smp_wmb()
+#define virt_read_barrier_depends() __smp_read_barrier_depends()
+#define virt_store_mb(var, value) __smp_store_mb(var, value)
+#define virt_mb__before_atomic() __smp_mb__before_atomic()
+#define virt_mb__after_atomic()        __smp_mb__after_atomic()
+#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_load_acquire(p) __smp_load_acquire(p)
 
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */
index e5ce8ab0b8b01a4e4d8bc17d3efd67f9c0b2d517..6e6cb0c9d7cbd5de972cef0077caad6bd7f1cafa 100644 (file)
@@ -70,7 +70,7 @@ struct virtio_config_ops {
        int (*find_vqs)(struct virtio_device *, unsigned nvqs,
                        struct virtqueue *vqs[],
                        vq_callback_t *callbacks[],
-                       const char *names[]);
+                       const char * const names[]);
        void (*del_vqs)(struct virtio_device *);
        u64 (*get_features)(struct virtio_device *vdev);
        int (*finalize_features)(struct virtio_device *vdev);
index 8e50888a6d595af06221ee43dfc2d0e4aa91c18e..a156e2b6ccfe33ef4901b7a3059f25add3c0f70c 100644 (file)
@@ -12,7 +12,7 @@
  * anyone care?
  *
  * For virtio_pci on SMP, we don't need to order with respect to MMIO
- * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * accesses through relaxed memory I/O windows, so virt_mb() et al are
  * sufficient.
  *
  * For using virtio to talk to real devices (eg. other heterogeneous
 
 static inline void virtio_mb(bool weak_barriers)
 {
-#ifdef CONFIG_SMP
        if (weak_barriers)
-               smp_mb();
+               virt_mb();
        else
-#endif
                mb();
 }
 
 static inline void virtio_rmb(bool weak_barriers)
 {
        if (weak_barriers)
-               dma_rmb();
+               virt_rmb();
        else
                rmb();
 }
@@ -42,11 +40,22 @@ static inline void virtio_rmb(bool weak_barriers)
 static inline void virtio_wmb(bool weak_barriers)
 {
        if (weak_barriers)
-               dma_wmb();
+               virt_wmb();
        else
                wmb();
 }
 
+static inline void virtio_store_mb(bool weak_barriers,
+                                  __virtio16 *p, __virtio16 v)
+{
+       if (weak_barriers) {
+               virt_store_mb(*p, v);
+       } else {
+               WRITE_ONCE(*p, v);
+               mb();
+       }
+}
+
 struct virtio_device;
 struct virtqueue;
 
index 7dc685b4057d33ad227d861a2a3115a5ca55cd3e..21f4fbd55e48edf03061b92d687a23cbe34a8989 100644 (file)
@@ -208,12 +208,12 @@ struct __name##_back_ring {                                               \
 
 
 #define RING_PUSH_REQUESTS(_r) do {                                    \
-    wmb(); /* back sees requests /before/ updated producer index */    \
+    virt_wmb(); /* back sees requests /before/ updated producer index */       \
     (_r)->sring->req_prod = (_r)->req_prod_pvt;                                \
 } while (0)
 
 #define RING_PUSH_RESPONSES(_r) do {                                   \
-    wmb(); /* front sees responses /before/ updated producer index */  \
+    virt_wmb(); /* front sees responses /before/ updated producer index */     \
     (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                                \
 } while (0)
 
@@ -250,9 +250,9 @@ struct __name##_back_ring {                                         \
 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {          \
     RING_IDX __old = (_r)->sring->req_prod;                            \
     RING_IDX __new = (_r)->req_prod_pvt;                               \
-    wmb(); /* back sees requests /before/ updated producer index */    \
+    virt_wmb(); /* back sees requests /before/ updated producer index */       \
     (_r)->sring->req_prod = __new;                                     \
-    mb(); /* back sees new requests /before/ we check req_event */     \
+    virt_mb(); /* back sees new requests /before/ we check req_event */        \
     (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <          \
                 (RING_IDX)(__new - __old));                            \
 } while (0)
@@ -260,9 +260,9 @@ struct __name##_back_ring {                                         \
 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {         \
     RING_IDX __old = (_r)->sring->rsp_prod;                            \
     RING_IDX __new = (_r)->rsp_prod_pvt;                               \
-    wmb(); /* front sees responses /before/ updated producer index */  \
+    virt_wmb(); /* front sees responses /before/ updated producer index */     \
     (_r)->sring->rsp_prod = __new;                                     \
-    mb(); /* front sees new responses /before/ we check rsp_event */   \
+    virt_mb(); /* front sees new responses /before/ we check rsp_event */      \
     (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <          \
                 (RING_IDX)(__new - __old));                            \
 } while (0)
@@ -271,7 +271,7 @@ struct __name##_back_ring {                                         \
     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                  \
     if (_work_to_do) break;                                            \
     (_r)->sring->req_event = (_r)->req_cons + 1;                       \
-    mb();                                                              \
+    virt_mb();                                                         \
     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                  \
 } while (0)
 
@@ -279,7 +279,7 @@ struct __name##_back_ring {                                         \
     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                 \
     if (_work_to_do) break;                                            \
     (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                       \
-    mb();                                                              \
+    virt_mb();                                                         \
     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                 \
 } while (0)
 
index d3116be5a00fa51646b5a0b45683a138a4ed3f7c..300117f1a08f35c27c531d55ed0a08a1a9b81e50 100644 (file)
@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
        bool dequeued_page;
 
        dequeued_page = false;
+       spin_lock_irqsave(&b_dev_info->pages_lock, flags);
        list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
                /*
                 * Block others from accessing the 'page' while we get around
@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
                                continue;
                        }
 #endif
-                       spin_lock_irqsave(&b_dev_info->pages_lock, flags);
                        balloon_page_delete(page);
                        __count_vm_event(BALLOON_DEFLATE);
-                       spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
                        unlock_page(page);
                        dequeued_page = true;
                        break;
                }
        }
+       spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 
        if (!dequeued_page) {
                /*
index 2b3c22808c3bfcd761ee6b7522354b77d31f3c9c..c7bf1aa2eeb3cb046b75100d68b23947562b409c 100755 (executable)
@@ -5116,13 +5116,44 @@ sub process {
                        }
                }
 # check for memory barriers without a comment.
-               if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
+
+               my $barriers = qr{
+                       mb|
+                       rmb|
+                       wmb|
+                       read_barrier_depends
+               }x;
+               my $barrier_stems = qr{
+                       mb__before_atomic|
+                       mb__after_atomic|
+                       store_release|
+                       load_acquire|
+                       store_mb|
+                       (?:$barriers)
+               }x;
+               my $all_barriers = qr{
+                       (?:$barriers)|
+                       smp_(?:$barrier_stems)|
+                       virt_(?:$barrier_stems)
+               }x;
+
+               if ($line =~ /\b(?:$all_barriers)\s*\(/) {
                        if (!ctx_has_comment($first_line, $linenr)) {
                                WARN("MEMORY_BARRIER",
                                     "memory barrier without comment\n" . $herecurr);
                        }
                }
 
+               my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x;
+
+               if ($realfile !~ m@^include/asm-generic/@ &&
+                   $realfile !~ m@/barrier\.h$@ &&
+                   $line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ &&
+                   $line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) {
+                       WARN("MEMORY_BARRIER",
+                            "__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr);
+               }
+
 # check for waitqueue_active without a comment.
                if ($line =~ /\bwaitqueue_active\s*\(/) {
                        if (!ctx_has_comment($first_line, $linenr)) {