]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_eal/common/include/generic/rte_spinlock.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_eal / common / include / generic / rte_spinlock.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef _RTE_SPINLOCK_H_
35 #define _RTE_SPINLOCK_H_
36
37 /**
38 * @file
39 *
40 * RTE Spinlocks
41 *
42 * This file defines an API for read-write locks, which are implemented
43 * in an architecture-specific way. This kind of lock simply waits in
44 * a loop repeatedly checking until the lock becomes available.
45 *
46 * All locks must be initialised before use, and only initialised once.
47 *
48 */
49
50 #include <rte_lcore.h>
51 #ifdef RTE_FORCE_INTRINSICS
52 #include <rte_common.h>
53 #endif
54
55 /**
56 * The rte_spinlock_t type.
57 */
58 typedef struct {
59 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
60 } rte_spinlock_t;
61
62 /**
63 * A static spinlock initializer.
64 */
65 #define RTE_SPINLOCK_INITIALIZER { 0 }
66
67 /**
68 * Initialize the spinlock to an unlocked state.
69 *
70 * @param sl
71 * A pointer to the spinlock.
72 */
73 static inline void
74 rte_spinlock_init(rte_spinlock_t *sl)
75 {
76 sl->locked = 0;
77 }
78
79 /**
80 * Take the spinlock.
81 *
82 * @param sl
83 * A pointer to the spinlock.
84 */
85 static inline void
86 rte_spinlock_lock(rte_spinlock_t *sl);
87
88 #ifdef RTE_FORCE_INTRINSICS
89 static inline void
90 rte_spinlock_lock(rte_spinlock_t *sl)
91 {
92 while (__sync_lock_test_and_set(&sl->locked, 1))
93 while(sl->locked)
94 rte_pause();
95 }
96 #endif
97
98 /**
99 * Release the spinlock.
100 *
101 * @param sl
102 * A pointer to the spinlock.
103 */
104 static inline void
105 rte_spinlock_unlock (rte_spinlock_t *sl);
106
107 #ifdef RTE_FORCE_INTRINSICS
108 static inline void
109 rte_spinlock_unlock (rte_spinlock_t *sl)
110 {
111 __sync_lock_release(&sl->locked);
112 }
113 #endif
114
115 /**
116 * Try to take the lock.
117 *
118 * @param sl
119 * A pointer to the spinlock.
120 * @return
121 * 1 if the lock is successfully taken; 0 otherwise.
122 */
123 static inline int
124 rte_spinlock_trylock (rte_spinlock_t *sl);
125
126 #ifdef RTE_FORCE_INTRINSICS
127 static inline int
128 rte_spinlock_trylock (rte_spinlock_t *sl)
129 {
130 return __sync_lock_test_and_set(&sl->locked,1) == 0;
131 }
132 #endif
133
134 /**
135 * Test if the lock is taken.
136 *
137 * @param sl
138 * A pointer to the spinlock.
139 * @return
140 * 1 if the lock is currently taken; 0 otherwise.
141 */
142 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
143 {
144 return sl->locked;
145 }
146
147 /**
148 * Test if hardware transactional memory (lock elision) is supported
149 *
150 * @return
151 * 1 if the hardware transactional memory is supported; 0 otherwise.
152 */
153 static inline int rte_tm_supported(void);
154
155 /**
156 * Try to execute critical section in a hardware memory transaction,
157 * if it fails or not available take the spinlock.
158 *
159 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
160 * transaction always aborts the transaction since the CPU is not able to
161 * roll-back should the transaction fail. Therefore, hardware transactional
162 * locks are not advised to be used around rte_eth_rx_burst() and
163 * rte_eth_tx_burst() calls.
164 *
165 * @param sl
166 * A pointer to the spinlock.
167 */
168 static inline void
169 rte_spinlock_lock_tm(rte_spinlock_t *sl);
170
171 /**
172 * Commit hardware memory transaction or release the spinlock if
173 * the spinlock is used as a fall-back
174 *
175 * @param sl
176 * A pointer to the spinlock.
177 */
178 static inline void
179 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
180
181 /**
182 * Try to execute critical section in a hardware memory transaction,
183 * if it fails or not available try to take the lock.
184 *
185 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
186 * transaction always aborts the transaction since the CPU is not able to
187 * roll-back should the transaction fail. Therefore, hardware transactional
188 * locks are not advised to be used around rte_eth_rx_burst() and
189 * rte_eth_tx_burst() calls.
190 *
191 * @param sl
192 * A pointer to the spinlock.
193 * @return
194 * 1 if the hardware memory transaction is successfully started
195 * or lock is successfully taken; 0 otherwise.
196 */
197 static inline int
198 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
199
200 /**
201 * The rte_spinlock_recursive_t type.
202 */
203 typedef struct {
204 rte_spinlock_t sl; /**< the actual spinlock */
205 volatile int user; /**< core id using lock, -1 for unused */
206 volatile int count; /**< count of time this lock has been called */
207 } rte_spinlock_recursive_t;
208
209 /**
210 * A static recursive spinlock initializer.
211 */
212 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
213
214 /**
215 * Initialize the recursive spinlock to an unlocked state.
216 *
217 * @param slr
218 * A pointer to the recursive spinlock.
219 */
220 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
221 {
222 rte_spinlock_init(&slr->sl);
223 slr->user = -1;
224 slr->count = 0;
225 }
226
227 /**
228 * Take the recursive spinlock.
229 *
230 * @param slr
231 * A pointer to the recursive spinlock.
232 */
233 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
234 {
235 int id = rte_gettid();
236
237 if (slr->user != id) {
238 rte_spinlock_lock(&slr->sl);
239 slr->user = id;
240 }
241 slr->count++;
242 }
243 /**
244 * Release the recursive spinlock.
245 *
246 * @param slr
247 * A pointer to the recursive spinlock.
248 */
249 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
250 {
251 if (--(slr->count) == 0) {
252 slr->user = -1;
253 rte_spinlock_unlock(&slr->sl);
254 }
255
256 }
257
258 /**
259 * Try to take the recursive lock.
260 *
261 * @param slr
262 * A pointer to the recursive spinlock.
263 * @return
264 * 1 if the lock is successfully taken; 0 otherwise.
265 */
266 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
267 {
268 int id = rte_gettid();
269
270 if (slr->user != id) {
271 if (rte_spinlock_trylock(&slr->sl) == 0)
272 return 0;
273 slr->user = id;
274 }
275 slr->count++;
276 return 1;
277 }
278
279
280 /**
281 * Try to execute critical section in a hardware memory transaction,
282 * if it fails or not available take the recursive spinlocks
283 *
284 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
285 * transaction always aborts the transaction since the CPU is not able to
286 * roll-back should the transaction fail. Therefore, hardware transactional
287 * locks are not advised to be used around rte_eth_rx_burst() and
288 * rte_eth_tx_burst() calls.
289 *
290 * @param slr
291 * A pointer to the recursive spinlock.
292 */
293 static inline void rte_spinlock_recursive_lock_tm(
294 rte_spinlock_recursive_t *slr);
295
296 /**
297 * Commit hardware memory transaction or release the recursive spinlock
298 * if the recursive spinlock is used as a fall-back
299 *
300 * @param slr
301 * A pointer to the recursive spinlock.
302 */
303 static inline void rte_spinlock_recursive_unlock_tm(
304 rte_spinlock_recursive_t *slr);
305
306 /**
307 * Try to execute critical section in a hardware memory transaction,
308 * if it fails or not available try to take the recursive lock
309 *
310 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
311 * transaction always aborts the transaction since the CPU is not able to
312 * roll-back should the transaction fail. Therefore, hardware transactional
313 * locks are not advised to be used around rte_eth_rx_burst() and
314 * rte_eth_tx_burst() calls.
315 *
316 * @param slr
317 * A pointer to the recursive spinlock.
318 * @return
319 * 1 if the hardware memory transaction is successfully started
320 * or lock is successfully taken; 0 otherwise.
321 */
322 static inline int rte_spinlock_recursive_trylock_tm(
323 rte_spinlock_recursive_t *slr);
324
325 #endif /* _RTE_SPINLOCK_H_ */