]>
git.proxmox.com Git - mirror_ovs.git/blob - lib/ovs-thread.h
2 * Copyright (c) 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #define OVS_THREAD_H 1
22 #include <sys/types.h>
23 #include "ovs-atomic.h"
24 #include "openvswitch/thread.h"
29 /* Poll-block()-able barrier similar to pthread_barrier_t. */
31 uint32_t size
; /* Number of threads to wait. */
32 atomic_count count
; /* Number of threads already hit the barrier. */
36 /* Wrappers for pthread_mutexattr_*() that abort the process on any error. */
37 void xpthread_mutexattr_init(pthread_mutexattr_t
*);
38 void xpthread_mutexattr_destroy(pthread_mutexattr_t
*);
39 void xpthread_mutexattr_settype(pthread_mutexattr_t
*, int type
);
40 void xpthread_mutexattr_gettype(pthread_mutexattr_t
*, int *typep
);
44 * An ovs_rwlock does not support recursive readers, because POSIX allows
45 * taking the reader lock recursively to deadlock when a thread is waiting on
46 * the write-lock. (NetBSD does deadlock.) glibc rwlocks in their default
47 * configuration do not deadlock, but ovs_rwlock_init() initializes rwlocks as
48 * non-recursive (which will deadlock) for two reasons:
50 * - glibc only provides fairness to writers in this mode.
52 * - It's better to find bugs in the primary Open vSwitch target rather
53 * than exposing them only to porters. */
54 struct OVS_LOCKABLE ovs_rwlock
{
55 pthread_rwlock_t lock
;
56 const char *where
; /* NULL if and only if uninitialized. */
60 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
61 #define OVS_RWLOCK_INITIALIZER \
62 { PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP, "<unlocked>" }
64 #define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, "<unlocked>" }
67 /* ovs_rwlock functions analogous to pthread_rwlock_*() functions.
69 * Most of these functions abort the process with an error message on any
70 * error. The "trylock" functions are exception: they pass through a 0 or
71 * EBUSY return value to the caller and abort on any other error. */
72 void ovs_rwlock_init(const struct ovs_rwlock
*);
73 void ovs_rwlock_destroy(const struct ovs_rwlock
*);
74 void ovs_rwlock_unlock(const struct ovs_rwlock
*rwlock
) OVS_RELEASES(rwlock
);
76 /* Wrappers for pthread_rwlockattr_*() that abort the process on any error. */
77 void xpthread_rwlockattr_init(pthread_rwlockattr_t
*);
78 void xpthread_rwlockattr_destroy(pthread_rwlockattr_t
*);
79 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
80 void xpthread_rwlockattr_setkind_np(pthread_rwlockattr_t
*, int kind
);
83 void ovs_rwlock_wrlock_at(const struct ovs_rwlock
*rwlock
, const char *where
)
84 OVS_ACQ_WRLOCK(rwlock
);
85 #define ovs_rwlock_wrlock(rwlock) \
86 ovs_rwlock_wrlock_at(rwlock, OVS_SOURCE_LOCATOR)
88 int ovs_rwlock_trywrlock_at(const struct ovs_rwlock
*rwlock
, const char *where
)
89 OVS_TRY_WRLOCK(0, rwlock
);
90 #define ovs_rwlock_trywrlock(rwlock) \
91 ovs_rwlock_trywrlock_at(rwlock, OVS_SOURCE_LOCATOR)
93 void ovs_rwlock_rdlock_at(const struct ovs_rwlock
*rwlock
, const char *where
)
94 OVS_ACQ_RDLOCK(rwlock
);
95 #define ovs_rwlock_rdlock(rwlock) \
96 ovs_rwlock_rdlock_at(rwlock, OVS_SOURCE_LOCATOR)
98 int ovs_rwlock_tryrdlock_at(const struct ovs_rwlock
*rwlock
, const char *where
)
99 OVS_TRY_RDLOCK(0, rwlock
);
100 #define ovs_rwlock_tryrdlock(rwlock) \
101 ovs_rwlock_tryrdlock_at(rwlock, OVS_SOURCE_LOCATOR)
103 /* ovs_barrier functions analogous to pthread_barrier_*() functions. */
104 void ovs_barrier_init(struct ovs_barrier
*, uint32_t count
);
105 void ovs_barrier_destroy(struct ovs_barrier
*);
106 void ovs_barrier_block(struct ovs_barrier
*);
108 /* Wrappers for xpthread_cond_*() that abort the process on any error.
110 * Use ovs_mutex_cond_wait() to wait for a condition. */
111 void xpthread_cond_init(pthread_cond_t
*, pthread_condattr_t
*);
112 void xpthread_cond_destroy(pthread_cond_t
*);
113 void xpthread_cond_signal(pthread_cond_t
*);
114 void xpthread_cond_broadcast(pthread_cond_t
*);
116 void xpthread_key_create(pthread_key_t
*, void (*destructor
)(void *));
117 void xpthread_key_delete(pthread_key_t
);
118 void xpthread_setspecific(pthread_key_t
, const void *);
121 void xpthread_sigmask(int, const sigset_t
*, sigset_t
*);
124 pthread_t
ovs_thread_create(const char *name
, void *(*)(void *), void *);
125 void xpthread_join(pthread_t
, void **);
133 * Multiple forms of standard per-thread data exist, each with its own pluses
134 * and minuses. In general, if one of these forms is appropriate, then it's a
135 * good idea to use it:
137 * - POSIX per-thread data via pthread_key_t is portable to any pthreads
138 * implementation, and allows a destructor function to be defined. It
139 * only (directly) supports per-thread pointers, which are always
140 * initialized to NULL. It requires once-only allocation of a
141 * pthread_key_t value. It is relatively slow. Typically few
142 * "pthread_key_t"s are available (POSIX requires only at least 128,
143 * glibc supplies only 1024).
145 * - The thread_local feature newly defined in C11 <threads.h> works with
146 * any data type and initializer, and it is fast. thread_local does not
147 * require once-only initialization like pthread_key_t. C11 does not
148 * define what happens if one attempts to access a thread_local object
149 * from a thread other than the one to which that object belongs. There
150 * is no provision to call a user-specified destructor when a thread
151 * ends. Typical implementations allow for an arbitrary amount of
152 * thread_local storage, but statically allocated only.
154 * - The __thread keyword is a GCC extension similar to thread_local but
155 * with a longer history. __thread is not portable to every GCC version
156 * or environment. __thread does not restrict the use of a thread-local
157 * object outside its own thread.
159 * Here's a handy summary:
161 * pthread_key_t thread_local __thread
162 * ------------- ------------ -------------
163 * portability high low medium
164 * speed low high high
165 * supports destructors? yes no no
166 * needs key allocation? yes no no
167 * arbitrary initializer? no yes yes
168 * cross-thread access? yes no yes
169 * amount available? few arbitrary arbitrary
170 * dynamically allocated? yes no no
176 * OVS provides some extensions and wrappers:
178 * - In a situation where the performance of thread_local or __thread is
179 * desirable, but portability is required, DEFINE_STATIC_PER_THREAD_DATA
180 * and DECLARE_EXTERN_PER_THREAD_DATA/DEFINE_EXTERN_PER_THREAD_DATA may
181 * be appropriate (see below).
183 * - DEFINE_PER_THREAD_MALLOCED_DATA can be convenient for simple
184 * per-thread malloc()'d buffers.
186 * - struct ovs_tsd provides an alternative to pthread_key_t that isn't
187 * limited to a small number of keys.
190 /* For static data, use this macro in a source file:
192 * DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, INITIALIZER).
194 * For global data, "declare" the data in the header and "define" it in
195 * the source file, with:
197 * DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME).
198 * DEFINE_EXTERN_PER_THREAD_DATA(NAME, INITIALIZER).
200 * One should prefer to use POSIX per-thread data, via pthread_key_t, when its
201 * performance is acceptable, because of its portability (see the table above).
202 * This macro is an alternatives that takes advantage of thread_local (and
203 * __thread), for its performance, when it is available, and falls back to
204 * POSIX per-thread data otherwise.
206 * Defines per-thread variable NAME with the given TYPE, initialized to
207 * INITIALIZER (which must be valid as an initializer for a variable with
210 * The public interface to the variable is:
212 * TYPE *NAME_get(void)
213 * TYPE *NAME_get_unsafe(void)
215 * Returns the address of this thread's instance of NAME.
217 * Use NAME_get() in a context where this might be the first use of the
218 * per-thread variable in the program. Use NAME_get_unsafe(), which
219 * avoids a conditional test and is thus slightly faster, in a context
220 * where one knows that NAME_get() has already been called previously.
222 * There is no "NAME_set()" (or "NAME_set_unsafe()") function. To set the
223 * value of the per-thread variable, dereference the pointer returned by
224 * TYPE_get() or TYPE_get_unsafe(), e.g. *TYPE_get() = 0.
226 #if HAVE_THREAD_LOCAL || HAVE___THREAD
228 #if HAVE_THREAD_LOCAL
231 #define thread_local __thread
236 #define DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, ...) \
237 typedef TYPE NAME##_type; \
239 static NAME##_type * \
240 NAME##_get_unsafe(void) \
242 static thread_local NAME##_type var = __VA_ARGS__; \
246 static NAME##_type * \
249 return NAME##_get_unsafe(); \
251 #define DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME) \
252 typedef TYPE NAME##_type; \
253 extern thread_local NAME##_type NAME##_var; \
255 static inline NAME##_type * \
256 NAME##_get_unsafe(void) \
258 return (NAME##_type *)&NAME##_var; \
261 static inline NAME##_type * \
264 return NAME##_get_unsafe(); \
266 #define DEFINE_EXTERN_PER_THREAD_DATA(NAME, ...) \
267 thread_local NAME##_type NAME##_var = __VA_ARGS__;
268 #else /* no C implementation support for thread-local storage */
269 #define DEFINE_STATIC_PER_THREAD_DATA(TYPE, NAME, ...) \
270 typedef TYPE NAME##_type; \
271 static pthread_key_t NAME##_key; \
273 static NAME##_type * \
274 NAME##_get_unsafe(void) \
276 return pthread_getspecific(NAME##_key); \
280 NAME##_once_init(void) \
282 if (pthread_key_create(&NAME##_key, free)) { \
287 static NAME##_type * \
290 static pthread_once_t once = PTHREAD_ONCE_INIT; \
291 NAME##_type *value; \
293 pthread_once(&once, NAME##_once_init); \
294 value = NAME##_get_unsafe(); \
296 static const NAME##_type initial_value = __VA_ARGS__; \
298 value = malloc(sizeof *value); \
299 if (value == NULL) { \
302 *value = initial_value; \
303 xpthread_setspecific(NAME##_key, value); \
307 #define DECLARE_EXTERN_PER_THREAD_DATA(TYPE, NAME) \
308 typedef TYPE NAME##_type; \
309 static pthread_key_t NAME##_key; \
311 static inline NAME##_type * \
312 NAME##_get_unsafe(void) \
314 return (NAME##_type *)pthread_getspecific(NAME##_key); \
317 NAME##_type *NAME##_get(void);
318 #define DEFINE_EXTERN_PER_THREAD_DATA(NAME, ...) \
320 NAME##_once_init(void) \
322 if (pthread_key_create(&NAME##_key, free)) { \
330 static pthread_once_t once = PTHREAD_ONCE_INIT; \
331 NAME##_type *value; \
333 pthread_once(&once, NAME##_once_init); \
334 value = NAME##_get_unsafe(); \
336 static const NAME##_type initial_value = __VA_ARGS__; \
338 value = malloc(sizeof *value); \
339 if (value == NULL) { \
342 *value = initial_value; \
343 xpthread_setspecific(NAME##_key, value); \
349 /* DEFINE_PER_THREAD_MALLOCED_DATA(TYPE, NAME).
351 * This is a simple wrapper around POSIX per-thread data primitives. It
352 * defines per-thread variable NAME with the given TYPE, which must be a
353 * pointer type. In each thread, the per-thread variable is initialized to
354 * NULL. When a thread terminates, the variable is freed with free().
356 * The public interface to the variable is:
358 * TYPE NAME_get(void)
359 * TYPE NAME_get_unsafe(void)
361 * Returns the value of per-thread variable NAME in this thread.
363 * Use NAME_get() in a context where this might be the first use of the
364 * per-thread variable in the program. Use NAME_get_unsafe(), which
365 * avoids a conditional test and is thus slightly faster, in a context
366 * where one knows that NAME_get() has already been called previously.
368 * TYPE NAME_set(TYPE new_value)
369 * TYPE NAME_set_unsafe(TYPE new_value)
371 * Sets the value of per-thread variable NAME to 'new_value' in this
372 * thread, and returns its previous value.
374 * Use NAME_set() in a context where this might be the first use of the
375 * per-thread variable in the program. Use NAME_set_unsafe(), which
376 * avoids a conditional test and is thus slightly faster, in a context
377 * where one knows that NAME_set() has already been called previously.
379 #define DEFINE_PER_THREAD_MALLOCED_DATA(TYPE, NAME) \
380 static pthread_key_t NAME##_key; \
383 NAME##_once_init(void) \
385 if (pthread_key_create(&NAME##_key, free)) { \
393 static pthread_once_t once = PTHREAD_ONCE_INIT; \
394 pthread_once(&once, NAME##_once_init); \
398 NAME##_get_unsafe(void) \
400 return pthread_getspecific(NAME##_key); \
403 static OVS_UNUSED TYPE \
407 return NAME##_get_unsafe(); \
411 NAME##_set_unsafe(TYPE value) \
413 TYPE old_value = NAME##_get_unsafe(); \
414 xpthread_setspecific(NAME##_key, value); \
418 static OVS_UNUSED TYPE \
419 NAME##_set(TYPE value) \
422 return NAME##_set_unsafe(value); \
425 /* Dynamically allocated thread-specific data with lots of slots.
427 * pthread_key_t can provide as few as 128 pieces of thread-specific data (even
428 * glibc is limited to 1,024). Thus, one must be careful to allocate only a
429 * few keys globally. One cannot, for example, allocate a key for every
430 * instance of a data structure if there might be an arbitrary number of those
433 * This API is similar to the pthread one (simply search and replace pthread_
434 * by ovsthread_) but it a much larger limit that can be raised if necessary
435 * (by recompiling). Thus, one may more freely use this form of
436 * thread-specific data.
438 * ovsthread_key_t also differs from pthread_key_t in the following ways:
440 * - Destructors must not access thread-specific data (via ovsthread_key).
442 * - The pthread_key_t API allows concurrently exiting threads to start
443 * executing the destructor after pthread_key_delete() returns. The
444 * ovsthread_key_t API guarantees that, when ovsthread_key_delete()
445 * returns, all destructors have returned and no new ones will start
448 typedef struct ovsthread_key
*ovsthread_key_t
;
450 void ovsthread_key_create(ovsthread_key_t
*, void (*destructor
)(void *));
451 void ovsthread_key_delete(ovsthread_key_t
);
453 void ovsthread_setspecific(ovsthread_key_t
, const void *);
454 void *ovsthread_getspecific(ovsthread_key_t
);
458 * pthread_t isn't so nice for some purposes. Its size and representation are
459 * implementation dependent, which means that there is no way to hash it.
460 * This thread ID avoids the problem.
463 #define OVSTHREAD_ID_UNSET UINT_MAX
464 DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, ovsthread_id
);
466 /* Initializes the unique per thread identifier */
467 unsigned int ovsthread_id_init(void);
469 /* Returns a per-thread identifier unique within the lifetime of the
471 static inline unsigned int
472 ovsthread_id_self(void)
474 unsigned int id
= *ovsthread_id_get();
476 if (OVS_UNLIKELY(id
== OVSTHREAD_ID_UNSET
)) {
477 id
= ovsthread_id_init();
483 /* Simulated global counter.
485 * Incrementing such a counter is meant to be cheaper than incrementing a
486 * global counter protected by a lock. It is probably more expensive than
487 * incrementing a truly thread-local variable, but such a variable has no
488 * straightforward way to get the sum.
494 * Fully thread-safe. */
496 struct ovsthread_stats
{
497 struct ovs_mutex mutex
;
498 void *volatile buckets
[16];
501 void ovsthread_stats_init(struct ovsthread_stats
*);
502 void ovsthread_stats_destroy(struct ovsthread_stats
*);
504 void *ovsthread_stats_bucket_get(struct ovsthread_stats
*,
505 void *(*new_bucket
)(void));
507 #define OVSTHREAD_STATS_FOR_EACH_BUCKET(BUCKET, IDX, STATS) \
508 for ((IDX) = ovs_thread_stats_next_bucket(STATS, 0); \
509 ((IDX) < ARRAY_SIZE((STATS)->buckets) \
510 ? ((BUCKET) = (STATS)->buckets[IDX], true) \
512 (IDX) = ovs_thread_stats_next_bucket(STATS, (IDX) + 1))
513 size_t ovs_thread_stats_next_bucket(const struct ovsthread_stats
*, size_t);
515 bool single_threaded(void);
517 void assert_single_threaded_at(const char *where
);
518 #define assert_single_threaded() assert_single_threaded_at(OVS_SOURCE_LOCATOR)
521 pid_t
xfork_at(const char *where
);
522 #define xfork() xfork_at(OVS_SOURCE_LOCATOR)
525 void forbid_forking(const char *reason
);
528 /* Useful functions related to threading. */
530 int count_cpu_cores(void);
531 bool thread_is_pmd(void);
533 #endif /* ovs-thread.h */