]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/rcu_queue.h
util/uri: Remove uri_string_unescape()
[mirror_qemu.git] / include / qemu / rcu_queue.h
1 #ifndef QEMU_RCU_QUEUE_H
2 #define QEMU_RCU_QUEUE_H
3
4 /*
5 * rcu_queue.h
6 *
7 * RCU-friendly versions of the queue.h primitives.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
24 *
25 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
26 */
27
28 #include "qemu/queue.h"
29 #include "qemu/atomic.h"
30
31 /*
32 * List access methods.
33 */
34 #define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL)
35 #define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first))
36 #define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next))
37
38 /*
39 * List functions.
40 */
41
42
43 /*
44 * The difference between qatomic_read/set and qatomic_rcu_read/set
45 * is in the including of a read/write memory barrier to the volatile
46 * access. atomic_rcu_* macros include the memory barrier, the
47 * plain atomic macros do not. Therefore, it should be correct to
48 * issue a series of reads or writes to the same element using only
49 * the atomic_* macro, until the last read or write, which should be
50 * atomic_rcu_* to introduce a read or write memory barrier as
51 * appropriate.
52 */
53
54 /* Upon publication of the listelm->next value, list readers
55 * will see the new node when following next pointers from
56 * antecedent nodes, but may not see the new node when following
57 * prev pointers from subsequent nodes until after the RCU grace
58 * period expires.
59 * see linux/include/rculist.h __list_add_rcu(new, prev, next)
60 */
61 #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
62 (elm)->field.le_next = (listelm)->field.le_next; \
63 (elm)->field.le_prev = &(listelm)->field.le_next; \
64 qatomic_rcu_set(&(listelm)->field.le_next, (elm)); \
65 if ((elm)->field.le_next != NULL) { \
66 (elm)->field.le_next->field.le_prev = \
67 &(elm)->field.le_next; \
68 } \
69 } while (/*CONSTCOND*/0)
70
71 /* Upon publication of the listelm->prev->next value, list
72 * readers will see the new element when following prev pointers
73 * from subsequent elements, but may not see the new element
74 * when following next pointers from antecedent elements
75 * until after the RCU grace period expires.
76 */
77 #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
78 (elm)->field.le_prev = (listelm)->field.le_prev; \
79 (elm)->field.le_next = (listelm); \
80 qatomic_rcu_set((listelm)->field.le_prev, (elm)); \
81 (listelm)->field.le_prev = &(elm)->field.le_next; \
82 } while (/*CONSTCOND*/0)
83
84 /* Upon publication of the head->first value, list readers
85 * will see the new element when following the head, but may
86 * not see the new element when following prev pointers from
87 * subsequent elements until after the RCU grace period has
88 * expired.
89 */
90 #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
91 (elm)->field.le_prev = &(head)->lh_first; \
92 (elm)->field.le_next = (head)->lh_first; \
93 qatomic_rcu_set((&(head)->lh_first), (elm)); \
94 if ((elm)->field.le_next != NULL) { \
95 (elm)->field.le_next->field.le_prev = \
96 &(elm)->field.le_next; \
97 } \
98 } while (/*CONSTCOND*/0)
99
100
101 /* prior to publication of the elm->prev->next value, some list
102 * readers may still see the removed element when following
103 * the antecedent's next pointer.
104 */
105 #define QLIST_REMOVE_RCU(elm, field) do { \
106 if ((elm)->field.le_next != NULL) { \
107 (elm)->field.le_next->field.le_prev = \
108 (elm)->field.le_prev; \
109 } \
110 qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \
111 } while (/*CONSTCOND*/0)
112
113 /* List traversal must occur within an RCU critical section. */
114 #define QLIST_FOREACH_RCU(var, head, field) \
115 for ((var) = qatomic_rcu_read(&(head)->lh_first); \
116 (var); \
117 (var) = qatomic_rcu_read(&(var)->field.le_next))
118
119 /* List traversal must occur within an RCU critical section. */
120 #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
121 for ((var) = (qatomic_rcu_read(&(head)->lh_first)); \
122 (var) && \
123 ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \
124 (var) = (next_var))
125
126 /*
127 * RCU simple queue
128 */
129
130 /* Simple queue access methods */
131 #define QSIMPLEQ_EMPTY_RCU(head) \
132 (qatomic_read(&(head)->sqh_first) == NULL)
133 #define QSIMPLEQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->sqh_first)
134 #define QSIMPLEQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sqe_next)
135
136 /* Simple queue functions */
137 #define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
138 (elm)->field.sqe_next = (head)->sqh_first; \
139 if ((elm)->field.sqe_next == NULL) { \
140 (head)->sqh_last = &(elm)->field.sqe_next; \
141 } \
142 qatomic_rcu_set(&(head)->sqh_first, (elm)); \
143 } while (/*CONSTCOND*/0)
144
145 #define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
146 (elm)->field.sqe_next = NULL; \
147 qatomic_rcu_set((head)->sqh_last, (elm)); \
148 (head)->sqh_last = &(elm)->field.sqe_next; \
149 } while (/*CONSTCOND*/0)
150
151 #define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
152 (elm)->field.sqe_next = (listelm)->field.sqe_next; \
153 if ((elm)->field.sqe_next == NULL) { \
154 (head)->sqh_last = &(elm)->field.sqe_next; \
155 } \
156 qatomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
157 } while (/*CONSTCOND*/0)
158
159 #define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
160 qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\
161 if ((head)->sqh_first == NULL) { \
162 (head)->sqh_last = &(head)->sqh_first; \
163 } \
164 } while (/*CONSTCOND*/0)
165
166 #define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \
167 if ((head)->sqh_first == (elm)) { \
168 QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \
169 } else { \
170 struct type *curr = (head)->sqh_first; \
171 while (curr->field.sqe_next != (elm)) { \
172 curr = curr->field.sqe_next; \
173 } \
174 qatomic_set(&curr->field.sqe_next, \
175 curr->field.sqe_next->field.sqe_next); \
176 if (curr->field.sqe_next == NULL) { \
177 (head)->sqh_last = &(curr)->field.sqe_next; \
178 } \
179 } \
180 } while (/*CONSTCOND*/0)
181
182 #define QSIMPLEQ_FOREACH_RCU(var, head, field) \
183 for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
184 (var); \
185 (var) = qatomic_rcu_read(&(var)->field.sqe_next))
186
187 #define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
188 for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
189 (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\
190 (var) = (next))
191
192 /*
193 * RCU tail queue
194 */
195
196 /* Tail queue access methods */
197 #define QTAILQ_EMPTY_RCU(head) (qatomic_read(&(head)->tqh_first) == NULL)
198 #define QTAILQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->tqh_first)
199 #define QTAILQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.tqe_next)
200
201 /* Tail queue functions */
202 #define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
203 (elm)->field.tqe_next = (head)->tqh_first; \
204 if ((elm)->field.tqe_next != NULL) { \
205 (head)->tqh_first->field.tqe_circ.tql_prev = \
206 &(elm)->field.tqe_circ; \
207 } else { \
208 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
209 } \
210 qatomic_rcu_set(&(head)->tqh_first, (elm)); \
211 (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
212 } while (/*CONSTCOND*/0)
213
214 #define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
215 (elm)->field.tqe_next = NULL; \
216 (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
217 qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
218 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
219 } while (/*CONSTCOND*/0)
220
221 #define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
222 (elm)->field.tqe_next = (listelm)->field.tqe_next; \
223 if ((elm)->field.tqe_next != NULL) { \
224 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
225 &(elm)->field.tqe_circ; \
226 } else { \
227 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
228 } \
229 qatomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
230 (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
231 } while (/*CONSTCOND*/0)
232
233 #define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
234 (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
235 (elm)->field.tqe_next = (listelm); \
236 qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\
237 (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
238 } while (/*CONSTCOND*/0)
239
240 #define QTAILQ_REMOVE_RCU(head, elm, field) do { \
241 if (((elm)->field.tqe_next) != NULL) { \
242 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
243 (elm)->field.tqe_circ.tql_prev; \
244 } else { \
245 (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
246 } \
247 qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, \
248 (elm)->field.tqe_next); \
249 (elm)->field.tqe_circ.tql_prev = NULL; \
250 } while (/*CONSTCOND*/0)
251
252 #define QTAILQ_FOREACH_RCU(var, head, field) \
253 for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
254 (var); \
255 (var) = qatomic_rcu_read(&(var)->field.tqe_next))
256
257 #define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
258 for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
259 (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\
260 (var) = (next))
261
262 /*
263 * RCU singly-linked list
264 */
265
266 /* Singly-linked list access methods */
267 #define QSLIST_EMPTY_RCU(head) (qatomic_read(&(head)->slh_first) == NULL)
268 #define QSLIST_FIRST_RCU(head) qatomic_rcu_read(&(head)->slh_first)
269 #define QSLIST_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sle_next)
270
271 /* Singly-linked list functions */
272 #define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
273 (elm)->field.sle_next = (head)->slh_first; \
274 qatomic_rcu_set(&(head)->slh_first, (elm)); \
275 } while (/*CONSTCOND*/0)
276
277 #define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
278 (elm)->field.sle_next = (listelm)->field.sle_next; \
279 qatomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
280 } while (/*CONSTCOND*/0)
281
282 #define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
283 qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\
284 } while (/*CONSTCOND*/0)
285
286 #define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
287 if ((head)->slh_first == (elm)) { \
288 QSLIST_REMOVE_HEAD_RCU((head), field); \
289 } else { \
290 struct type *curr = (head)->slh_first; \
291 while (curr->field.sle_next != (elm)) { \
292 curr = curr->field.sle_next; \
293 } \
294 qatomic_set(&curr->field.sle_next, \
295 curr->field.sle_next->field.sle_next); \
296 } \
297 } while (/*CONSTCOND*/0)
298
299 #define QSLIST_FOREACH_RCU(var, head, field) \
300 for ((var) = qatomic_rcu_read(&(head)->slh_first); \
301 (var); \
302 (var) = qatomic_rcu_read(&(var)->field.sle_next))
303
304 #define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
305 for ((var) = qatomic_rcu_read(&(head)->slh_first); \
306 (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \
307 (var) = (next))
308
309 #endif /* QEMU_RCU_QUEUE_H */