]> git.proxmox.com Git - mirror_ovs.git/blob - lib/ovs-thread.h
cirrus: Use FreeBSD 12.2.
[mirror_ovs.git] / lib / ovs-thread.h
1 /*
2 * Copyright (c) 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef OVS_THREAD_H
18 #define OVS_THREAD_H 1
19
20 #include <pthread.h>
21 #include <stddef.h>
22 #include <sys/types.h>
23 #include "ovs-atomic.h"
24 #include "openvswitch/thread.h"
25 #include "util.h"
26
27 struct seq;
28
29 /* Poll-block()-able barrier similar to pthread_barrier_t. */
30 struct ovs_barrier {
31 uint32_t size; /* Number of threads to wait. */
32 atomic_count count; /* Number of threads already hit the barrier. */
33 struct seq *seq;
34 };
35
36 /* Wrappers for pthread_mutexattr_*() that abort the process on any error. */
37 void xpthread_mutexattr_init(pthread_mutexattr_t *);
38 void xpthread_mutexattr_destroy(pthread_mutexattr_t *);
39 void xpthread_mutexattr_settype(pthread_mutexattr_t *, int type);
40 void xpthread_mutexattr_gettype(pthread_mutexattr_t *, int *typep);
41
42 /* Read-write lock.
43 *
44 * An ovs_rwlock does not support recursive readers, because POSIX allows
45 * taking the reader lock recursively to deadlock when a thread is waiting on
46 * the write-lock. (NetBSD does deadlock.) glibc rwlocks in their default
47 * configuration do not deadlock, but ovs_rwlock_init() initializes rwlocks as
48 * non-recursive (which will deadlock) for two reasons:
49 *
50 * - glibc only provides fairness to writers in this mode.
51 *
52 * - It's better to find bugs in the primary Open vSwitch target rather
53 * than exposing them only to porters. */
54 struct OVS_LOCKABLE ovs_rwlock {
55 pthread_rwlock_t lock;
56 const char *where; /* NULL if and only if uninitialized. */
57 };
58
59 /* Initializer. */
60 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
61 #define OVS_RWLOCK_INITIALIZER \
62 { PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP, "<unlocked>" }
63 #else
64 #define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, "<unlocked>" }
65 #endif
66
67 /* ovs_rwlock functions analogous to pthread_rwlock_*() functions.
68 *
69 * Most of these functions abort the process with an error message on any
70 * error. The "trylock" functions are exception: they pass through a 0 or
71 * EBUSY return value to the caller and abort on any other error. */
72 void ovs_rwlock_init(const struct ovs_rwlock *);
73 void ovs_rwlock_destroy(const struct ovs_rwlock *);
74 void ovs_rwlock_unlock(const struct ovs_rwlock *rwlock) OVS_RELEASES(rwlock);
75
76 /* Wrappers for pthread_rwlockattr_*() that abort the process on any error. */
77 void xpthread_rwlockattr_init(pthread_rwlockattr_t *);
78 void xpthread_rwlockattr_destroy(pthread_rwlockattr_t *);
79 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
80 void xpthread_rwlockattr_setkind_np(pthread_rwlockattr_t *, int kind);
81 #endif
82
83 void ovs_rwlock_wrlock_at(const struct ovs_rwlock *rwlock, const char *where)
84 OVS_ACQ_WRLOCK(rwlock);
85 #define ovs_rwlock_wrlock(rwlock) \
86 ovs_rwlock_wrlock_at(rwlock, OVS_SOURCE_LOCATOR)
87
88 int ovs_rwlock_trywrlock_at(const struct ovs_rwlock *rwlock, const char *where)
89 OVS_TRY_WRLOCK(0, rwlock);
90 #define ovs_rwlock_trywrlock(rwlock) \
91 ovs_rwlock_trywrlock_at(rwlock, OVS_SOURCE_LOCATOR)
92
93 void ovs_rwlock_rdlock_at(const struct ovs_rwlock *rwlock, const char *where)
94 OVS_ACQ_RDLOCK(rwlock);
95 #define ovs_rwlock_rdlock(rwlock) \
96 ovs_rwlock_rdlock_at(rwlock, OVS_SOURCE_LOCATOR)
97
98 int ovs_rwlock_tryrdlock_at(const struct ovs_rwlock *rwlock, const char *where)
99 OVS_TRY_RDLOCK(0, rwlock);
100 #define ovs_rwlock_tryrdlock(rwlock) \
101 ovs_rwlock_tryrdlock_at(rwlock, OVS_SOURCE_LOCATOR)
102
103 /* ovs_barrier functions analogous to pthread_barrier_*() functions. */
104 void ovs_barrier_init(struct ovs_barrier *, uint32_t count);
105 void ovs_barrier_destroy(struct ovs_barrier *);
106 void ovs_barrier_block(struct ovs_barrier *);
107
108 /* Wrappers for xpthread_cond_*() that abort the process on any error.
109 *
110 * Use ovs_mutex_cond_wait() to wait for a condition. */
111 void xpthread_cond_init(pthread_cond_t *, pthread_condattr_t *);
112 void xpthread_cond_destroy(pthread_cond_t *);
113 void xpthread_cond_signal(pthread_cond_t *);
114 void xpthread_cond_broadcast(pthread_cond_t *);
115
116 void xpthread_key_create(pthread_key_t *, void (*destructor)(void *));
117 void xpthread_key_delete(pthread_key_t);
118 void xpthread_setspecific(pthread_key_t, const void *);
119
120 #ifndef _WIN32
121 void xpthread_sigmask(int, const sigset_t *, sigset_t *);
122 #endif
123
124 pthread_t ovs_thread_create(const char *name, void *(*)(void *), void *);
125 void xpthread_join(pthread_t, void **);
126 \f
127 /* Per-thread data.
128 *
129 *
130 * Standard Forms
131 * ==============
132 *
133 * Multiple forms of standard per-thread data exist, each with its own pluses
134 * and minuses. In general, if one of these forms is appropriate, then it's a
135 * good idea to use it:
136 *
137 * - POSIX per-thread data via pthread_key_t is portable to any pthreads
138 * implementation, and allows a destructor function to be defined. It
139 * only (directly) supports per-thread pointers, which are always
140 * initialized to NULL. It requires once-only allocation of a
141 * pthread_key_t value. It is relatively slow. Typically few
142 * "pthread_key_t"s are available (POSIX requires only at least 128,
143 * glibc supplies only 1024).
144 *
145 * - The thread_local feature newly defined in C11 <threads.h> works with
146 * any data type and initializer, and it is fast. thread_local does not
147 * require once-only initialization like pthread_key_t. C11 does not
148 * define what happens if one attempts to access a thread_local object
149 * from a thread other than the one to which that object belongs. There
150 * is no provision to call a user-specified destructor when a thread
151 * ends. Typical implementations allow for an arbitrary amount of
152 * thread_local storage, but statically allocated only.
153 *
154 * - The __thread keyword is a GCC extension similar to thread_local but
155 * with a longer history. __thread is not portable to every GCC version
156 * or environment. __thread does not restrict the use of a thread-local
157 * object outside its own thread.
158 *
159 * Here's a handy summary:
160 *
161 * pthread_key_t thread_local __thread
162 * ------------- ------------ -------------
163 * portability high low medium
164 * speed low high high
165 * supports destructors? yes no no
166 * needs key allocation? yes no no
167 * arbitrary initializer? no yes yes
168 * cross-thread access? yes no yes
169 * amount available? few arbitrary arbitrary
170 * dynamically allocated? yes no no
171 *
172 *
173 * Extensions
174 * ==========
175 *
176 * OVS provides some extensions and wrappers:
177 *
178 * - In a situation where the performance of thread_local or __thread is
179 * desirable, but portability is required, DEFINE_STATIC_PER_THREAD_DATA
180 * and DECLARE_EXTERN_PER_THREAD_DATA/DEFINE_EXTERN_PER_THREAD_DATA may
181 * be appropriate (see below).
182 *
183 * - DEFINE_PER_THREAD_MALLOCED_DATA can be convenient for simple
184 * per-thread malloc()'d buffers.
185 *
186 * - struct ovs_tsd provides an alternative to pthread_key_t that isn't
187 * limited to a small number of keys.
188 */
189
190 /* For static data, use this macro in a source file:
191 *
192 * DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, INITIALIZER).
193 *
194 * For global data, "declare" the data in the header and "define" it in
195 * the source file, with:
196 *
197 * DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME).
198 * DEFINE_EXTERN_PER_THREAD_DATA(NAME, INITIALIZER).
199 *
200 * One should prefer to use POSIX per-thread data, via pthread_key_t, when its
201 * performance is acceptable, because of its portability (see the table above).
202 * This macro is an alternatives that takes advantage of thread_local (and
203 * __thread), for its performance, when it is available, and falls back to
204 * POSIX per-thread data otherwise.
205 *
206 * Defines per-thread variable NAME with the given TYPE, initialized to
207 * INITIALIZER (which must be valid as an initializer for a variable with
208 * static lifetime).
209 *
210 * The public interface to the variable is:
211 *
212 * TYPE *NAME_get(void)
213 * TYPE *NAME_get_unsafe(void)
214 *
215 * Returns the address of this thread's instance of NAME.
216 *
217 * Use NAME_get() in a context where this might be the first use of the
218 * per-thread variable in the program. Use NAME_get_unsafe(), which
219 * avoids a conditional test and is thus slightly faster, in a context
220 * where one knows that NAME_get() has already been called previously.
221 *
222 * There is no "NAME_set()" (or "NAME_set_unsafe()") function. To set the
223 * value of the per-thread variable, dereference the pointer returned by
224 * TYPE_get() or TYPE_get_unsafe(), e.g. *TYPE_get() = 0.
225 */
226 #if HAVE_THREAD_LOCAL || HAVE___THREAD
227
228 #if HAVE_THREAD_LOCAL
229 #include <threads.h>
230 #elif HAVE___THREAD
231 #define thread_local __thread
232 #else
233 #error
234 #endif
235
236 #define DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, ...) \
237 typedef TYPE NAME##_type; \
238 \
239 static NAME##_type * \
240 NAME##_get_unsafe(void) \
241 { \
242 static thread_local NAME##_type var = __VA_ARGS__; \
243 return &var; \
244 } \
245 \
246 static NAME##_type * \
247 NAME##_get(void) \
248 { \
249 return NAME##_get_unsafe(); \
250 }
251 #define DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME) \
252 typedef TYPE NAME##_type; \
253 extern thread_local NAME##_type NAME##_var; \
254 \
255 static inline NAME##_type * \
256 NAME##_get_unsafe(void) \
257 { \
258 return (NAME##_type *)&NAME##_var; \
259 } \
260 \
261 static inline NAME##_type * \
262 NAME##_get(void) \
263 { \
264 return NAME##_get_unsafe(); \
265 }
266 #define DEFINE_EXTERN_PER_THREAD_DATA(NAME, ...) \
267 thread_local NAME##_type NAME##_var = __VA_ARGS__;
268 #else /* no C implementation support for thread-local storage */
269 #define DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, ...) \
270 typedef TYPE NAME##_type; \
271 static pthread_key_t NAME##_key; \
272 \
273 static NAME##_type * \
274 NAME##_get_unsafe(void) \
275 { \
276 return pthread_getspecific(NAME##_key); \
277 } \
278 \
279 static void \
280 NAME##_once_init(void) \
281 { \
282 if (pthread_key_create(&NAME##_key, free)) { \
283 abort(); \
284 } \
285 } \
286 \
287 static NAME##_type * \
288 NAME##_get(void) \
289 { \
290 static pthread_once_t once = PTHREAD_ONCE_INIT; \
291 NAME##_type *value; \
292 \
293 pthread_once(&once, NAME##_once_init); \
294 value = NAME##_get_unsafe(); \
295 if (!value) { \
296 static const NAME##_type initial_value = __VA_ARGS__; \
297 \
298 value = malloc(sizeof *value); \
299 if (value == NULL) { \
300 out_of_memory(); \
301 } \
302 *value = initial_value; \
303 xpthread_setspecific(NAME##_key, value); \
304 } \
305 return value; \
306 }
307 #define DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME) \
308 typedef TYPE NAME##_type; \
309 static pthread_key_t NAME##_key; \
310 \
311 static inline NAME##_type * \
312 NAME##_get_unsafe(void) \
313 { \
314 return (NAME##_type *)pthread_getspecific(NAME##_key); \
315 } \
316 \
317 NAME##_type *NAME##_get(void);
318 #define DEFINE_EXTERN_PER_THREAD_DATA(NAME, ...) \
319 static void \
320 NAME##_once_init(void) \
321 { \
322 if (pthread_key_create(&NAME##_key, free)) { \
323 abort(); \
324 } \
325 } \
326 \
327 NAME##_type * \
328 NAME##_get(void) \
329 { \
330 static pthread_once_t once = PTHREAD_ONCE_INIT; \
331 NAME##_type *value; \
332 \
333 pthread_once(&once, NAME##_once_init); \
334 value = NAME##_get_unsafe(); \
335 if (!value) { \
336 static const NAME##_type initial_value = __VA_ARGS__; \
337 \
338 value = malloc(sizeof *value); \
339 if (value == NULL) { \
340 out_of_memory(); \
341 } \
342 *value = initial_value; \
343 xpthread_setspecific(NAME##_key, value); \
344 } \
345 return value; \
346 }
347 #endif
348
349 /* DEFINE_PER_THREAD_MALLOCED_DATA(TYPE, NAME).
350 *
351 * This is a simple wrapper around POSIX per-thread data primitives. It
352 * defines per-thread variable NAME with the given TYPE, which must be a
353 * pointer type. In each thread, the per-thread variable is initialized to
354 * NULL. When a thread terminates, the variable is freed with free().
355 *
356 * The public interface to the variable is:
357 *
358 * TYPE NAME_get(void)
359 * TYPE NAME_get_unsafe(void)
360 *
361 * Returns the value of per-thread variable NAME in this thread.
362 *
363 * Use NAME_get() in a context where this might be the first use of the
364 * per-thread variable in the program. Use NAME_get_unsafe(), which
365 * avoids a conditional test and is thus slightly faster, in a context
366 * where one knows that NAME_get() has already been called previously.
367 *
368 * TYPE NAME_set(TYPE new_value)
369 * TYPE NAME_set_unsafe(TYPE new_value)
370 *
371 * Sets the value of per-thread variable NAME to 'new_value' in this
372 * thread, and returns its previous value.
373 *
374 * Use NAME_set() in a context where this might be the first use of the
375 * per-thread variable in the program. Use NAME_set_unsafe(), which
376 * avoids a conditional test and is thus slightly faster, in a context
377 * where one knows that NAME_set() has already been called previously.
378 */
379 #define DEFINE_PER_THREAD_MALLOCED_DATA(TYPE, NAME) \
380 static pthread_key_t NAME##_key; \
381 \
382 static void \
383 NAME##_once_init(void) \
384 { \
385 if (pthread_key_create(&NAME##_key, free)) { \
386 abort(); \
387 } \
388 } \
389 \
390 static void \
391 NAME##_init(void) \
392 { \
393 static pthread_once_t once = PTHREAD_ONCE_INIT; \
394 pthread_once(&once, NAME##_once_init); \
395 } \
396 \
397 static TYPE \
398 NAME##_get_unsafe(void) \
399 { \
400 return pthread_getspecific(NAME##_key); \
401 } \
402 \
403 static OVS_UNUSED TYPE \
404 NAME##_get(void) \
405 { \
406 NAME##_init(); \
407 return NAME##_get_unsafe(); \
408 } \
409 \
410 static TYPE \
411 NAME##_set_unsafe(TYPE value) \
412 { \
413 TYPE old_value = NAME##_get_unsafe(); \
414 xpthread_setspecific(NAME##_key, value); \
415 return old_value; \
416 } \
417 \
418 static OVS_UNUSED TYPE \
419 NAME##_set(TYPE value) \
420 { \
421 NAME##_init(); \
422 return NAME##_set_unsafe(value); \
423 }
424
425 /* Dynamically allocated thread-specific data with lots of slots.
426 *
427 * pthread_key_t can provide as few as 128 pieces of thread-specific data (even
428 * glibc is limited to 1,024). Thus, one must be careful to allocate only a
429 * few keys globally. One cannot, for example, allocate a key for every
430 * instance of a data structure if there might be an arbitrary number of those
431 * data structures.
432 *
433 * This API is similar to the pthread one (simply search and replace pthread_
434 * by ovsthread_) but it a much larger limit that can be raised if necessary
435 * (by recompiling). Thus, one may more freely use this form of
436 * thread-specific data.
437 *
438 * ovsthread_key_t also differs from pthread_key_t in the following ways:
439 *
440 * - Destructors must not access thread-specific data (via ovsthread_key).
441 *
442 * - The pthread_key_t API allows concurrently exiting threads to start
443 * executing the destructor after pthread_key_delete() returns. The
444 * ovsthread_key_t API guarantees that, when ovsthread_key_delete()
445 * returns, all destructors have returned and no new ones will start
446 * execution.
447 */
448 typedef struct ovsthread_key *ovsthread_key_t;
449
450 void ovsthread_key_create(ovsthread_key_t *, void (*destructor)(void *));
451 void ovsthread_key_delete(ovsthread_key_t);
452
453 void ovsthread_setspecific(ovsthread_key_t, const void *);
454 void *ovsthread_getspecific(ovsthread_key_t);
455 \f
456 /* Thread ID.
457 *
458 * pthread_t isn't so nice for some purposes. Its size and representation are
459 * implementation dependent, which means that there is no way to hash it.
460 * This thread ID avoids the problem.
461 */
462
463 #define OVSTHREAD_ID_UNSET UINT_MAX
464 DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, ovsthread_id);
465
466 /* Initializes the unique per thread identifier */
467 unsigned int ovsthread_id_init(void);
468
469 /* Returns a per-thread identifier unique within the lifetime of the
470 * process. */
471 static inline unsigned int
472 ovsthread_id_self(void)
473 {
474 unsigned int id = *ovsthread_id_get();
475
476 if (OVS_UNLIKELY(id == OVSTHREAD_ID_UNSET)) {
477 id = ovsthread_id_init();
478 }
479
480 return id;
481 }
482 \f
483 /* Simulated global counter.
484 *
485 * Incrementing such a counter is meant to be cheaper than incrementing a
486 * global counter protected by a lock. It is probably more expensive than
487 * incrementing a truly thread-local variable, but such a variable has no
488 * straightforward way to get the sum.
489 *
490 *
491 * Thread-safety
492 * =============
493 *
494 * Fully thread-safe. */
495
496 struct ovsthread_stats {
497 struct ovs_mutex mutex;
498 void *volatile buckets[16];
499 };
500
501 void ovsthread_stats_init(struct ovsthread_stats *);
502 void ovsthread_stats_destroy(struct ovsthread_stats *);
503
504 void *ovsthread_stats_bucket_get(struct ovsthread_stats *,
505 void *(*new_bucket)(void));
506
507 #define OVSTHREAD_STATS_FOR_EACH_BUCKET(BUCKET, IDX, STATS) \
508 for ((IDX) = ovs_thread_stats_next_bucket(STATS, 0); \
509 ((IDX) < ARRAY_SIZE((STATS)->buckets) \
510 ? ((BUCKET) = (STATS)->buckets[IDX], true) \
511 : false); \
512 (IDX) = ovs_thread_stats_next_bucket(STATS, (IDX) + 1))
513 size_t ovs_thread_stats_next_bucket(const struct ovsthread_stats *, size_t);
514 \f
515 bool single_threaded(void);
516
517 void assert_single_threaded_at(const char *where);
518 #define assert_single_threaded() assert_single_threaded_at(OVS_SOURCE_LOCATOR)
519
520 #ifndef _WIN32
521 pid_t xfork_at(const char *where);
522 #define xfork() xfork_at(OVS_SOURCE_LOCATOR)
523 #endif
524
525 void forbid_forking(const char *reason);
526 bool may_fork(void);
527 \f
528 /* Useful functions related to threading. */
529
530 int count_cpu_cores(void);
531 bool thread_is_pmd(void);
532
533 #endif /* ovs-thread.h */