]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/boost/atomic/detail/ops_gcc_ppc_common.hpp
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / boost / atomic / detail / ops_gcc_ppc_common.hpp
CommitLineData
b32b8144
FG
1/*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2009 Helge Bahmann
7 * Copyright (c) 2013 Tim Blechmann
8 * Copyright (c) 2014 Andrey Semashev
9 */
10/*!
11 * \file atomic/detail/ops_gcc_ppc_common.hpp
12 *
13 * This header contains basic utilities for gcc PowerPC backend.
14 */
15
16#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
17#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
18
19#include <boost/memory_order.hpp>
20#include <boost/atomic/detail/config.hpp>
21
22#ifdef BOOST_HAS_PRAGMA_ONCE
23#pragma once
24#endif
25
26namespace boost {
27namespace atomics {
28namespace detail {
29
30// The implementation below uses information from this document:
31// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
32
33// A note about memory_order_consume. Technically, this architecture allows to avoid
34// unnecessary memory barrier after consume load since it supports data dependency ordering.
35// However, some compiler optimizations may break a seemingly valid code relying on data
36// dependency tracking by injecting bogus branches to aid out of order execution.
37// This may happen not only in Boost.Atomic code but also in user's code, which we have no
38// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
39// For this reason we promote memory_order_consume to memory_order_acquire.
40
41struct gcc_ppc_operations_base
42{
11fdf7f2 43 static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
b32b8144
FG
44 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
45
46 static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
47 {
48#if defined(__powerpc64__) || defined(__PPC64__)
49 if (order == memory_order_seq_cst)
50 __asm__ __volatile__ ("sync" ::: "memory");
11fdf7f2 51 else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
b32b8144
FG
52 __asm__ __volatile__ ("lwsync" ::: "memory");
53#else
11fdf7f2 54 if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
b32b8144
FG
55 __asm__ __volatile__ ("sync" ::: "memory");
56#endif
57 }
58
59 static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
60 {
11fdf7f2 61 if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
b32b8144
FG
62 __asm__ __volatile__ ("isync" ::: "memory");
63 }
64};
65
66} // namespace detail
67} // namespace atomics
68} // namespace boost
69
70#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_