]>
git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_SPINLOCK_X86_64_H_
35 #define _RTE_SPINLOCK_X86_64_H_
41 #include "generic/rte_spinlock.h"
43 #include "rte_cpuflags.h"
44 #include "rte_branch_prediction.h"
45 #include "rte_common.h"
47 #define RTE_RTM_MAX_RETRIES (10)
48 #define RTE_XABORT_LOCK_BUSY (0xff)
50 #ifndef RTE_FORCE_INTRINSICS
52 rte_spinlock_lock(rte_spinlock_t
*sl
)
57 "xchg %[locked], %[lv]\n"
62 "cmpl $0, %[locked]\n"
66 : [locked
] "=m" (sl
->locked
), [lv
] "=q" (lock_val
)
72 rte_spinlock_unlock (rte_spinlock_t
*sl
)
76 "xchg %[locked], %[ulv]\n"
77 : [locked
] "=m" (sl
->locked
), [ulv
] "=q" (unlock_val
)
78 : "[ulv]" (unlock_val
)
83 rte_spinlock_trylock (rte_spinlock_t
*sl
)
88 "xchg %[locked], %[lockval]"
89 : [locked
] "=m" (sl
->locked
), [lockval
] "=q" (lockval
)
90 : "[lockval]" (lockval
)
97 extern uint8_t rte_rtm_supported
;
99 static inline int rte_tm_supported(void)
101 return rte_rtm_supported
;
105 rte_try_tm(volatile int *lock
)
107 if (!rte_rtm_supported
)
110 int retries
= RTE_RTM_MAX_RETRIES
;
112 while (likely(retries
--)) {
114 unsigned int status
= rte_xbegin();
116 if (likely(RTE_XBEGIN_STARTED
== status
)) {
118 rte_xabort(RTE_XABORT_LOCK_BUSY
);
125 if ((status
& RTE_XABORT_EXPLICIT
) &&
126 (RTE_XABORT_CODE(status
) == RTE_XABORT_LOCK_BUSY
))
129 if ((status
& RTE_XABORT_RETRY
) == 0) /* do not retry */
136 rte_spinlock_lock_tm(rte_spinlock_t
*sl
)
138 if (likely(rte_try_tm(&sl
->locked
)))
141 rte_spinlock_lock(sl
); /* fall-back */
145 rte_spinlock_trylock_tm(rte_spinlock_t
*sl
)
147 if (likely(rte_try_tm(&sl
->locked
)))
150 return rte_spinlock_trylock(sl
);
154 rte_spinlock_unlock_tm(rte_spinlock_t
*sl
)
156 if (unlikely(sl
->locked
))
157 rte_spinlock_unlock(sl
);
163 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t
*slr
)
165 if (likely(rte_try_tm(&slr
->sl
.locked
)))
168 rte_spinlock_recursive_lock(slr
); /* fall-back */
172 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t
*slr
)
174 if (unlikely(slr
->sl
.locked
))
175 rte_spinlock_recursive_unlock(slr
);
181 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t
*slr
)
183 if (likely(rte_try_tm(&slr
->sl
.locked
)))
186 return rte_spinlock_recursive_trylock(slr
);
194 #endif /* _RTE_SPINLOCK_X86_64_H_ */