]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/boost/atomic/detail/ops_gcc_arm_common.hpp
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / boost / atomic / detail / ops_gcc_arm_common.hpp
CommitLineData
b32b8144
FG
1/*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2009 Helge Bahmann
7 * Copyright (c) 2013 Tim Blechmann
8 * Copyright (c) 2014 Andrey Semashev
9 */
10/*!
11 * \file atomic/detail/ops_gcc_arm_common.hpp
12 *
13 * This header contains basic utilities for gcc ARM backend.
14 */
15
16#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
17#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
18
19#include <boost/cstdint.hpp>
20#include <boost/memory_order.hpp>
21#include <boost/atomic/detail/config.hpp>
22
23#ifdef BOOST_HAS_PRAGMA_ONCE
24#pragma once
25#endif
26
27namespace boost {
28namespace atomics {
29namespace detail {
30
31// A memory barrier is effected using a "co-processor 15" instruction,
32// though a separate assembler mnemonic is available for it in v7.
33//
34// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
35// doesn't include all instructions and in particular it doesn't include the co-processor
36// instruction used for the memory barrier or the load-locked/store-conditional
37// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
38// asm blocks with code to temporarily change to ARM mode.
39//
40// You can only change between ARM and Thumb modes when branching using the bx instruction.
41// bx takes an address specified in a register. The least significant bit of the address
42// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
43// A temporary register is needed for the address and is passed as an argument to these
44// macros. It must be one of the "low" registers accessible to Thumb code, specified
45// using the "l" attribute in the asm statement.
46//
47// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
48// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
49// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
50// So in v7 we don't need to change to ARM mode; we can write "universal
51// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
52// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
53// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
54// so they can always be present.
55
56// A note about memory_order_consume. Technically, this architecture allows to avoid
57// unnecessary memory barrier after consume load since it supports data dependency ordering.
58// However, some compiler optimizations may break a seemingly valid code relying on data
59// dependency tracking by injecting bogus branches to aid out of order execution.
60// This may happen not only in Boost.Atomic code but also in user's code, which we have no
61// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
62// For this reason we promote memory_order_consume to memory_order_acquire.
63
64#if defined(__thumb__) && !defined(__thumb2__)
65#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
66#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
67#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
68#else
69// The tmpreg may be wasted in this case, which is non-optimal.
70#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
71#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
72#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
73#endif
74
75struct gcc_arm_operations_base
76{
11fdf7f2 77 static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
b32b8144
FG
78 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
79
80 static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
81 {
11fdf7f2 82 if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
b32b8144
FG
83 hardware_full_fence();
84 }
85
86 static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
87 {
11fdf7f2 88 if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
b32b8144
FG
89 hardware_full_fence();
90 }
91
92 static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
93 {
94 if (order == memory_order_seq_cst)
95 hardware_full_fence();
96 }
97
98 static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
99 {
100#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
101 // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
102 // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
103 // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
104 __asm__ __volatile__
105 (
106#if defined(__thumb2__)
107 ".short 0xF3BF, 0x8F5B\n" // dmb ish
108#else
109 ".word 0xF57FF05B\n" // dmb ish
110#endif
111 :
112 :
113 : "memory"
114 );
115#else
116 uint32_t tmp;
117 __asm__ __volatile__
118 (
119 BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
120 "mcr\tp15, 0, r0, c7, c10, 5\n"
121 BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
122 : "=&l" (tmp)
123 :
124 : "memory"
125 );
126#endif
127 }
128};
129
130} // namespace detail
131} // namespace atomics
132} // namespace boost
133
134#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_