]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/include/generic/rte_ticketlock.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / include / generic / rte_ticketlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Arm Limited
3 */
4
5 #ifndef _RTE_TICKETLOCK_H_
6 #define _RTE_TICKETLOCK_H_
7
8 /**
9 * @file
10 *
11 * RTE ticket locks
12 *
13 * This file defines an API for ticket locks, which give each waiting
14 * thread a ticket and take the lock one by one, first come, first
15 * serviced.
16 *
17 * All locks must be initialised before use, and only initialised once.
18 *
19 */
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 #include <rte_common.h>
26 #include <rte_lcore.h>
27 #include <rte_pause.h>
28
29 /**
30 * The rte_ticketlock_t type.
31 */
32 typedef union {
33 uint32_t tickets;
34 struct {
35 uint16_t current;
36 uint16_t next;
37 } s;
38 } rte_ticketlock_t;
39
40 /**
41 * A static ticketlock initializer.
42 */
43 #define RTE_TICKETLOCK_INITIALIZER { 0 }
44
45 /**
46 * Initialize the ticketlock to an unlocked state.
47 *
48 * @param tl
49 * A pointer to the ticketlock.
50 */
51 __rte_experimental
52 static inline void
53 rte_ticketlock_init(rte_ticketlock_t *tl)
54 {
55 __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
56 }
57
58 /**
59 * Take the ticketlock.
60 *
61 * @param tl
62 * A pointer to the ticketlock.
63 */
64 __rte_experimental
65 static inline void
66 rte_ticketlock_lock(rte_ticketlock_t *tl)
67 {
68 uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
69 rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE);
70 }
71
72 /**
73 * Release the ticketlock.
74 *
75 * @param tl
76 * A pointer to the ticketlock.
77 */
78 __rte_experimental
79 static inline void
80 rte_ticketlock_unlock(rte_ticketlock_t *tl)
81 {
82 uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
83 __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
84 }
85
86 /**
87 * Try to take the lock.
88 *
89 * @param tl
90 * A pointer to the ticketlock.
91 * @return
92 * 1 if the lock is successfully taken; 0 otherwise.
93 */
94 __rte_experimental
95 static inline int
96 rte_ticketlock_trylock(rte_ticketlock_t *tl)
97 {
98 rte_ticketlock_t old, new;
99 old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
100 new.tickets = old.tickets;
101 new.s.next++;
102 if (old.s.next == old.s.current) {
103 if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
104 new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
105 return 1;
106 }
107
108 return 0;
109 }
110
111 /**
112 * Test if the lock is taken.
113 *
114 * @param tl
115 * A pointer to the ticketlock.
116 * @return
117 * 1 if the lock is currently taken; 0 otherwise.
118 */
119 __rte_experimental
120 static inline int
121 rte_ticketlock_is_locked(rte_ticketlock_t *tl)
122 {
123 rte_ticketlock_t tic;
124 tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
125 return (tic.s.current != tic.s.next);
126 }
127
128 /**
129 * The rte_ticketlock_recursive_t type.
130 */
131 #define TICKET_LOCK_INVALID_ID -1
132
133 typedef struct {
134 rte_ticketlock_t tl; /**< the actual ticketlock */
135 int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
136 unsigned int count; /**< count of time this lock has been called */
137 } rte_ticketlock_recursive_t;
138
139 /**
140 * A static recursive ticketlock initializer.
141 */
142 #define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
143 TICKET_LOCK_INVALID_ID, 0}
144
145 /**
146 * Initialize the recursive ticketlock to an unlocked state.
147 *
148 * @param tlr
149 * A pointer to the recursive ticketlock.
150 */
151 __rte_experimental
152 static inline void
153 rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
154 {
155 rte_ticketlock_init(&tlr->tl);
156 __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
157 tlr->count = 0;
158 }
159
160 /**
161 * Take the recursive ticketlock.
162 *
163 * @param tlr
164 * A pointer to the recursive ticketlock.
165 */
166 __rte_experimental
167 static inline void
168 rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
169 {
170 int id = rte_gettid();
171
172 if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
173 rte_ticketlock_lock(&tlr->tl);
174 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
175 }
176 tlr->count++;
177 }
178
179 /**
180 * Release the recursive ticketlock.
181 *
182 * @param tlr
183 * A pointer to the recursive ticketlock.
184 */
185 __rte_experimental
186 static inline void
187 rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
188 {
189 if (--(tlr->count) == 0) {
190 __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
191 __ATOMIC_RELAXED);
192 rte_ticketlock_unlock(&tlr->tl);
193 }
194 }
195
196 /**
197 * Try to take the recursive lock.
198 *
199 * @param tlr
200 * A pointer to the recursive ticketlock.
201 * @return
202 * 1 if the lock is successfully taken; 0 otherwise.
203 */
204 __rte_experimental
205 static inline int
206 rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
207 {
208 int id = rte_gettid();
209
210 if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
211 if (rte_ticketlock_trylock(&tlr->tl) == 0)
212 return 0;
213 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
214 }
215 tlr->count++;
216 return 1;
217 }
218
219 #ifdef __cplusplus
220 }
221 #endif
222
223 #endif /* _RTE_TICKETLOCK_H_ */