]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/common/include/generic/rte_spinlock.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / common / include / generic / rte_spinlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #ifndef _RTE_SPINLOCK_H_
6 #define _RTE_SPINLOCK_H_
7
8 /**
9 * @file
10 *
11 * RTE Spinlocks
12 *
13 * This file defines an API for read-write locks, which are implemented
14 * in an architecture-specific way. This kind of lock simply waits in
15 * a loop repeatedly checking until the lock becomes available.
16 *
17 * All locks must be initialised before use, and only initialised once.
18 *
19 */
20
21 #include <rte_lcore.h>
22 #ifdef RTE_FORCE_INTRINSICS
23 #include <rte_common.h>
24 #endif
25 #include <rte_pause.h>
26
27 /**
28 * The rte_spinlock_t type.
29 */
30 typedef struct {
31 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
32 } rte_spinlock_t;
33
34 /**
35 * A static spinlock initializer.
36 */
37 #define RTE_SPINLOCK_INITIALIZER { 0 }
38
39 /**
40 * Initialize the spinlock to an unlocked state.
41 *
42 * @param sl
43 * A pointer to the spinlock.
44 */
45 static inline void
46 rte_spinlock_init(rte_spinlock_t *sl)
47 {
48 sl->locked = 0;
49 }
50
51 /**
52 * Take the spinlock.
53 *
54 * @param sl
55 * A pointer to the spinlock.
56 */
57 static inline void
58 rte_spinlock_lock(rte_spinlock_t *sl);
59
60 #ifdef RTE_FORCE_INTRINSICS
61 static inline void
62 rte_spinlock_lock(rte_spinlock_t *sl)
63 {
64 int exp = 0;
65
66 while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
67 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
68 while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
69 rte_pause();
70 exp = 0;
71 }
72 }
73 #endif
74
75 /**
76 * Release the spinlock.
77 *
78 * @param sl
79 * A pointer to the spinlock.
80 */
81 static inline void
82 rte_spinlock_unlock (rte_spinlock_t *sl);
83
84 #ifdef RTE_FORCE_INTRINSICS
85 static inline void
86 rte_spinlock_unlock (rte_spinlock_t *sl)
87 {
88 __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
89 }
90 #endif
91
92 /**
93 * Try to take the lock.
94 *
95 * @param sl
96 * A pointer to the spinlock.
97 * @return
98 * 1 if the lock is successfully taken; 0 otherwise.
99 */
100 static inline int
101 rte_spinlock_trylock (rte_spinlock_t *sl);
102
103 #ifdef RTE_FORCE_INTRINSICS
104 static inline int
105 rte_spinlock_trylock (rte_spinlock_t *sl)
106 {
107 int exp = 0;
108 return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
109 0, /* disallow spurious failure */
110 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
111 }
112 #endif
113
114 /**
115 * Test if the lock is taken.
116 *
117 * @param sl
118 * A pointer to the spinlock.
119 * @return
120 * 1 if the lock is currently taken; 0 otherwise.
121 */
122 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
123 {
124 return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
125 }
126
127 /**
128 * Test if hardware transactional memory (lock elision) is supported
129 *
130 * @return
131 * 1 if the hardware transactional memory is supported; 0 otherwise.
132 */
133 static inline int rte_tm_supported(void);
134
135 /**
136 * Try to execute critical section in a hardware memory transaction,
137 * if it fails or not available take the spinlock.
138 *
139 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
140 * transaction always aborts the transaction since the CPU is not able to
141 * roll-back should the transaction fail. Therefore, hardware transactional
142 * locks are not advised to be used around rte_eth_rx_burst() and
143 * rte_eth_tx_burst() calls.
144 *
145 * @param sl
146 * A pointer to the spinlock.
147 */
148 static inline void
149 rte_spinlock_lock_tm(rte_spinlock_t *sl);
150
151 /**
152 * Commit hardware memory transaction or release the spinlock if
153 * the spinlock is used as a fall-back
154 *
155 * @param sl
156 * A pointer to the spinlock.
157 */
158 static inline void
159 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
160
161 /**
162 * Try to execute critical section in a hardware memory transaction,
163 * if it fails or not available try to take the lock.
164 *
165 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
166 * transaction always aborts the transaction since the CPU is not able to
167 * roll-back should the transaction fail. Therefore, hardware transactional
168 * locks are not advised to be used around rte_eth_rx_burst() and
169 * rte_eth_tx_burst() calls.
170 *
171 * @param sl
172 * A pointer to the spinlock.
173 * @return
174 * 1 if the hardware memory transaction is successfully started
175 * or lock is successfully taken; 0 otherwise.
176 */
177 static inline int
178 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
179
180 /**
181 * The rte_spinlock_recursive_t type.
182 */
183 typedef struct {
184 rte_spinlock_t sl; /**< the actual spinlock */
185 volatile int user; /**< core id using lock, -1 for unused */
186 volatile int count; /**< count of time this lock has been called */
187 } rte_spinlock_recursive_t;
188
189 /**
190 * A static recursive spinlock initializer.
191 */
192 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
193
194 /**
195 * Initialize the recursive spinlock to an unlocked state.
196 *
197 * @param slr
198 * A pointer to the recursive spinlock.
199 */
200 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
201 {
202 rte_spinlock_init(&slr->sl);
203 slr->user = -1;
204 slr->count = 0;
205 }
206
207 /**
208 * Take the recursive spinlock.
209 *
210 * @param slr
211 * A pointer to the recursive spinlock.
212 */
213 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
214 {
215 int id = rte_gettid();
216
217 if (slr->user != id) {
218 rte_spinlock_lock(&slr->sl);
219 slr->user = id;
220 }
221 slr->count++;
222 }
223 /**
224 * Release the recursive spinlock.
225 *
226 * @param slr
227 * A pointer to the recursive spinlock.
228 */
229 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
230 {
231 if (--(slr->count) == 0) {
232 slr->user = -1;
233 rte_spinlock_unlock(&slr->sl);
234 }
235
236 }
237
238 /**
239 * Try to take the recursive lock.
240 *
241 * @param slr
242 * A pointer to the recursive spinlock.
243 * @return
244 * 1 if the lock is successfully taken; 0 otherwise.
245 */
246 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
247 {
248 int id = rte_gettid();
249
250 if (slr->user != id) {
251 if (rte_spinlock_trylock(&slr->sl) == 0)
252 return 0;
253 slr->user = id;
254 }
255 slr->count++;
256 return 1;
257 }
258
259
260 /**
261 * Try to execute critical section in a hardware memory transaction,
262 * if it fails or not available take the recursive spinlocks
263 *
264 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
265 * transaction always aborts the transaction since the CPU is not able to
266 * roll-back should the transaction fail. Therefore, hardware transactional
267 * locks are not advised to be used around rte_eth_rx_burst() and
268 * rte_eth_tx_burst() calls.
269 *
270 * @param slr
271 * A pointer to the recursive spinlock.
272 */
273 static inline void rte_spinlock_recursive_lock_tm(
274 rte_spinlock_recursive_t *slr);
275
276 /**
277 * Commit hardware memory transaction or release the recursive spinlock
278 * if the recursive spinlock is used as a fall-back
279 *
280 * @param slr
281 * A pointer to the recursive spinlock.
282 */
283 static inline void rte_spinlock_recursive_unlock_tm(
284 rte_spinlock_recursive_t *slr);
285
286 /**
287 * Try to execute critical section in a hardware memory transaction,
288 * if it fails or not available try to take the recursive lock
289 *
290 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
291 * transaction always aborts the transaction since the CPU is not able to
292 * roll-back should the transaction fail. Therefore, hardware transactional
293 * locks are not advised to be used around rte_eth_rx_burst() and
294 * rte_eth_tx_burst() calls.
295 *
296 * @param slr
297 * A pointer to the recursive spinlock.
298 * @return
299 * 1 if the hardware memory transaction is successfully started
300 * or lock is successfully taken; 0 otherwise.
301 */
302 static inline int rte_spinlock_recursive_trylock_tm(
303 rte_spinlock_recursive_t *slr);
304
305 #endif /* _RTE_SPINLOCK_H_ */