]>
Commit | Line | Data |
---|---|---|
ec68790f | 1 | /* |
e9020da2 | 2 | * Copyright (c) 2013, 2014 Nicira, Inc. |
ec68790f BP |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
18 | #include "ovs-thread.h" | |
19 | #include <errno.h> | |
728a8b14 BP |
20 | #include <poll.h> |
21 | #include <stdlib.h> | |
22 | #include <unistd.h> | |
ec68790f | 23 | #include "compiler.h" |
ed27e010 | 24 | #include "hash.h" |
0f2ea848 | 25 | #include "ovs-rcu.h" |
728a8b14 BP |
26 | #include "poll-loop.h" |
27 | #include "socket-util.h" | |
ec68790f BP |
28 | #include "util.h" |
29 | ||
30 | #ifdef __CHECKER__ | |
31 | /* Omit the definitions in this file because they are somewhat difficult to | |
32 | * write without prompting "sparse" complaints, without ugliness or | |
33 | * cut-and-paste. Since "sparse" is just a checker, not a compiler, it | |
34 | * doesn't matter that we don't define them. */ | |
35 | #else | |
728a8b14 BP |
36 | #include "vlog.h" |
37 | ||
38 | VLOG_DEFINE_THIS_MODULE(ovs_thread); | |
39 | ||
40 | /* If there is a reason that we cannot fork anymore (unless the fork will be | |
41 | * immediately followed by an exec), then this points to a string that | |
42 | * explains why. */ | |
43 | static const char *must_not_fork; | |
44 | ||
45 | /* True if we created any threads beyond the main initial thread. */ | |
46 | static bool multithreaded; | |
47 | ||
97be1538 EJ |
48 | #define LOCK_FUNCTION(TYPE, FUN) \ |
49 | void \ | |
50 | ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \ | |
51 | const char *where) \ | |
da203561 | 52 | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
97be1538 EJ |
53 | { \ |
54 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ | |
55 | int error = pthread_##TYPE##_##FUN(&l->lock); \ | |
56 | if (OVS_UNLIKELY(error)) { \ | |
57 | ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \ | |
58 | } \ | |
59 | l->where = where; \ | |
60 | } | |
61 | LOCK_FUNCTION(mutex, lock); | |
62 | LOCK_FUNCTION(rwlock, rdlock); | |
63 | LOCK_FUNCTION(rwlock, wrlock); | |
64 | ||
65 | #define TRY_LOCK_FUNCTION(TYPE, FUN) \ | |
66 | int \ | |
67 | ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \ | |
68 | const char *where) \ | |
da203561 | 69 | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
97be1538 EJ |
70 | { \ |
71 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ | |
72 | int error = pthread_##TYPE##_##FUN(&l->lock); \ | |
73 | if (OVS_UNLIKELY(error) && error != EBUSY) { \ | |
74 | ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \ | |
75 | } \ | |
76 | if (!error) { \ | |
77 | l->where = where; \ | |
78 | } \ | |
79 | return error; \ | |
80 | } | |
81 | TRY_LOCK_FUNCTION(mutex, trylock); | |
82 | TRY_LOCK_FUNCTION(rwlock, tryrdlock); | |
83 | TRY_LOCK_FUNCTION(rwlock, trywrlock); | |
84 | ||
85 | #define UNLOCK_FUNCTION(TYPE, FUN) \ | |
86 | void \ | |
87 | ovs_##TYPE##_##FUN(const struct ovs_##TYPE *l_) \ | |
da203561 | 88 | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
97be1538 EJ |
89 | { \ |
90 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ | |
91 | int error; \ | |
92 | l->where = NULL; \ | |
93 | error = pthread_##TYPE##_##FUN(&l->lock); \ | |
94 | if (OVS_UNLIKELY(error)) { \ | |
95 | ovs_abort(error, "pthread_%s_%sfailed", #TYPE, #FUN); \ | |
96 | } \ | |
97 | } | |
98 | UNLOCK_FUNCTION(mutex, unlock); | |
99 | UNLOCK_FUNCTION(mutex, destroy); | |
100 | UNLOCK_FUNCTION(rwlock, unlock); | |
101 | UNLOCK_FUNCTION(rwlock, destroy); | |
102 | ||
ec68790f BP |
103 | #define XPTHREAD_FUNC1(FUNCTION, PARAM1) \ |
104 | void \ | |
105 | x##FUNCTION(PARAM1 arg1) \ | |
106 | { \ | |
107 | int error = FUNCTION(arg1); \ | |
108 | if (OVS_UNLIKELY(error)) { \ | |
109 | ovs_abort(error, "%s failed", #FUNCTION); \ | |
110 | } \ | |
111 | } | |
ec68790f BP |
112 | #define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2) \ |
113 | void \ | |
114 | x##FUNCTION(PARAM1 arg1, PARAM2 arg2) \ | |
115 | { \ | |
116 | int error = FUNCTION(arg1, arg2); \ | |
117 | if (OVS_UNLIKELY(error)) { \ | |
118 | ovs_abort(error, "%s failed", #FUNCTION); \ | |
119 | } \ | |
120 | } | |
f0e4e85d JS |
121 | #define XPTHREAD_FUNC3(FUNCTION, PARAM1, PARAM2, PARAM3)\ |
122 | void \ | |
123 | x##FUNCTION(PARAM1 arg1, PARAM2 arg2, PARAM3 arg3) \ | |
124 | { \ | |
125 | int error = FUNCTION(arg1, arg2, arg3); \ | |
126 | if (OVS_UNLIKELY(error)) { \ | |
127 | ovs_abort(error, "%s failed", #FUNCTION); \ | |
128 | } \ | |
129 | } | |
ec68790f | 130 | |
13d94ee9 AW |
131 | XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *); |
132 | XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *); | |
b847adc6 BP |
133 | XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *); |
134 | XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *); | |
135 | XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int); | |
136 | XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *); | |
137 | ||
6b59b543 BP |
138 | XPTHREAD_FUNC1(pthread_rwlockattr_init, pthread_rwlockattr_t *); |
139 | XPTHREAD_FUNC1(pthread_rwlockattr_destroy, pthread_rwlockattr_t *); | |
140 | #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP | |
141 | XPTHREAD_FUNC2(pthread_rwlockattr_setkind_np, pthread_rwlockattr_t *, int); | |
142 | #endif | |
143 | ||
ec68790f | 144 | XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *); |
a8e736a8 | 145 | XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *); |
ec68790f BP |
146 | XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *); |
147 | XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *); | |
ec68790f | 148 | |
f0e4e85d JS |
149 | XPTHREAD_FUNC3(pthread_barrier_init, pthread_barrier_t *, |
150 | pthread_barrierattr_t *, unsigned int); | |
151 | XPTHREAD_FUNC1(pthread_barrier_destroy, pthread_barrier_t *); | |
152 | ||
ec2905a8 EJ |
153 | XPTHREAD_FUNC2(pthread_join, pthread_t, void **); |
154 | ||
ec68790f BP |
155 | typedef void destructor_func(void *); |
156 | XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *); | |
e9020da2 | 157 | XPTHREAD_FUNC1(pthread_key_delete, pthread_key_t); |
9c4c45ed | 158 | XPTHREAD_FUNC2(pthread_setspecific, pthread_key_t, const void *); |
ec68790f | 159 | |
834d6caf BP |
160 | static void |
161 | ovs_mutex_init__(const struct ovs_mutex *l_, int type) | |
97be1538 EJ |
162 | { |
163 | struct ovs_mutex *l = CONST_CAST(struct ovs_mutex *, l_); | |
164 | pthread_mutexattr_t attr; | |
165 | int error; | |
166 | ||
167 | l->where = NULL; | |
168 | xpthread_mutexattr_init(&attr); | |
169 | xpthread_mutexattr_settype(&attr, type); | |
170 | error = pthread_mutex_init(&l->lock, &attr); | |
171 | if (OVS_UNLIKELY(error)) { | |
172 | ovs_abort(error, "pthread_mutex_init failed"); | |
173 | } | |
174 | xpthread_mutexattr_destroy(&attr); | |
175 | } | |
176 | ||
834d6caf BP |
177 | /* Initializes 'mutex' as a normal (non-recursive) mutex. */ |
178 | void | |
179 | ovs_mutex_init(const struct ovs_mutex *mutex) | |
180 | { | |
181 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_ERRORCHECK); | |
182 | } | |
183 | ||
184 | /* Initializes 'mutex' as a recursive mutex. */ | |
185 | void | |
186 | ovs_mutex_init_recursive(const struct ovs_mutex *mutex) | |
187 | { | |
188 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_RECURSIVE); | |
189 | } | |
190 | ||
ea6f3f9a JR |
191 | /* Initializes 'mutex' as a recursive mutex. */ |
192 | void | |
193 | ovs_mutex_init_adaptive(const struct ovs_mutex *mutex) | |
194 | { | |
195 | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP | |
196 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_ADAPTIVE_NP); | |
197 | #else | |
198 | ovs_mutex_init(mutex); | |
199 | #endif | |
200 | } | |
201 | ||
97be1538 EJ |
202 | void |
203 | ovs_rwlock_init(const struct ovs_rwlock *l_) | |
204 | { | |
205 | struct ovs_rwlock *l = CONST_CAST(struct ovs_rwlock *, l_); | |
6b59b543 | 206 | pthread_rwlockattr_t attr; |
97be1538 EJ |
207 | int error; |
208 | ||
209 | l->where = NULL; | |
6b59b543 BP |
210 | |
211 | xpthread_rwlockattr_init(&attr); | |
212 | #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP | |
213 | xpthread_rwlockattr_setkind_np( | |
214 | &attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); | |
215 | #endif | |
97be1538 EJ |
216 | error = pthread_rwlock_init(&l->lock, NULL); |
217 | if (OVS_UNLIKELY(error)) { | |
218 | ovs_abort(error, "pthread_rwlock_init failed"); | |
219 | } | |
6b59b543 | 220 | xpthread_rwlockattr_destroy(&attr); |
97be1538 EJ |
221 | } |
222 | ||
223 | void | |
224 | ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_) | |
225 | { | |
226 | struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_); | |
0f2ea848 BP |
227 | int error; |
228 | ||
229 | ovsrcu_quiesce_start(); | |
230 | error = pthread_cond_wait(cond, &mutex->lock); | |
231 | ovsrcu_quiesce_end(); | |
232 | ||
97be1538 EJ |
233 | if (OVS_UNLIKELY(error)) { |
234 | ovs_abort(error, "pthread_cond_wait failed"); | |
235 | } | |
236 | } | |
f0e4e85d JS |
237 | |
238 | int | |
239 | xpthread_barrier_wait(pthread_barrier_t *barrier) | |
240 | { | |
241 | int error; | |
242 | ||
243 | error = pthread_barrier_wait(barrier); | |
244 | if (error && OVS_UNLIKELY(error != PTHREAD_BARRIER_SERIAL_THREAD)) { | |
245 | ovs_abort(error, "pthread_barrier_wait failed"); | |
246 | } | |
247 | ||
248 | return error; | |
249 | } | |
6878fada BP |
250 | \f |
251 | DEFINE_EXTERN_PER_THREAD_DATA(ovsthread_id, 0); | |
252 | ||
253 | struct ovsthread_aux { | |
254 | void *(*start)(void *); | |
255 | void *arg; | |
256 | }; | |
257 | ||
258 | static void * | |
259 | ovsthread_wrapper(void *aux_) | |
260 | { | |
261 | static atomic_uint next_id = ATOMIC_VAR_INIT(1); | |
262 | ||
263 | struct ovsthread_aux *auxp = aux_; | |
264 | struct ovsthread_aux aux; | |
265 | unsigned int id; | |
266 | ||
267 | atomic_add(&next_id, 1, &id); | |
268 | *ovsthread_id_get() = id; | |
269 | ||
270 | aux = *auxp; | |
271 | free(auxp); | |
272 | ||
0f2ea848 | 273 | ovsrcu_quiesce_end(); |
6878fada BP |
274 | return aux.start(aux.arg); |
275 | } | |
97be1538 | 276 | |
ec68790f BP |
277 | void |
278 | xpthread_create(pthread_t *threadp, pthread_attr_t *attr, | |
279 | void *(*start)(void *), void *arg) | |
280 | { | |
6878fada | 281 | struct ovsthread_aux *aux; |
ec68790f BP |
282 | pthread_t thread; |
283 | int error; | |
284 | ||
728a8b14 BP |
285 | forbid_forking("multiple threads exist"); |
286 | multithreaded = true; | |
0f2ea848 | 287 | ovsrcu_quiesce_end(); |
728a8b14 | 288 | |
6878fada BP |
289 | aux = xmalloc(sizeof *aux); |
290 | aux->start = start; | |
291 | aux->arg = arg; | |
292 | ||
293 | error = pthread_create(threadp ? threadp : &thread, attr, | |
294 | ovsthread_wrapper, aux); | |
ec68790f BP |
295 | if (error) { |
296 | ovs_abort(error, "pthread_create failed"); | |
297 | } | |
298 | } | |
1514b275 BP |
299 | \f |
300 | bool | |
301 | ovsthread_once_start__(struct ovsthread_once *once) | |
302 | { | |
97be1538 | 303 | ovs_mutex_lock(&once->mutex); |
1514b275 BP |
304 | if (!ovsthread_once_is_done__(once)) { |
305 | return false; | |
306 | } | |
97be1538 | 307 | ovs_mutex_unlock(&once->mutex); |
1514b275 BP |
308 | return true; |
309 | } | |
310 | ||
97be1538 | 311 | void |
1514b275 BP |
312 | ovsthread_once_done(struct ovsthread_once *once) |
313 | { | |
314 | atomic_store(&once->done, true); | |
97be1538 | 315 | ovs_mutex_unlock(&once->mutex); |
1514b275 | 316 | } |
728a8b14 | 317 | \f |
0f2ea848 BP |
318 | bool |
319 | single_threaded(void) | |
320 | { | |
321 | return !multithreaded; | |
322 | } | |
323 | ||
728a8b14 | 324 | /* Asserts that the process has not yet created any threads (beyond the initial |
5453ae20 BP |
325 | * thread). |
326 | * | |
327 | * ('where' is used in logging. Commonly one would use | |
328 | * assert_single_threaded() to automatically provide the caller's source file | |
329 | * and line number for 'where'.) */ | |
728a8b14 | 330 | void |
5453ae20 | 331 | assert_single_threaded_at(const char *where) |
728a8b14 BP |
332 | { |
333 | if (multithreaded) { | |
334 | VLOG_FATAL("%s: attempted operation not allowed when multithreaded", | |
335 | where); | |
336 | } | |
337 | } | |
338 | ||
40a9237d | 339 | #ifndef _WIN32 |
728a8b14 BP |
340 | /* Forks the current process (checking that this is allowed). Aborts with |
341 | * VLOG_FATAL if fork() returns an error, and otherwise returns the value | |
5453ae20 BP |
342 | * returned by fork(). |
343 | * | |
344 | * ('where' is used in logging. Commonly one would use xfork() to | |
345 | * automatically provide the caller's source file and line number for | |
346 | * 'where'.) */ | |
728a8b14 | 347 | pid_t |
5453ae20 | 348 | xfork_at(const char *where) |
728a8b14 BP |
349 | { |
350 | pid_t pid; | |
351 | ||
352 | if (must_not_fork) { | |
353 | VLOG_FATAL("%s: attempted to fork but forking not allowed (%s)", | |
354 | where, must_not_fork); | |
355 | } | |
356 | ||
357 | pid = fork(); | |
358 | if (pid < 0) { | |
97be1538 | 359 | VLOG_FATAL("%s: fork failed (%s)", where, ovs_strerror(errno)); |
728a8b14 BP |
360 | } |
361 | return pid; | |
362 | } | |
40a9237d | 363 | #endif |
728a8b14 BP |
364 | |
365 | /* Notes that the process must not call fork() from now on, for the specified | |
366 | * 'reason'. (The process may still fork() if it execs itself immediately | |
367 | * afterward.) */ | |
368 | void | |
369 | forbid_forking(const char *reason) | |
370 | { | |
371 | ovs_assert(reason != NULL); | |
372 | must_not_fork = reason; | |
373 | } | |
374 | ||
375 | /* Returns true if the process is allowed to fork, false otherwise. */ | |
376 | bool | |
377 | may_fork(void) | |
378 | { | |
379 | return !must_not_fork; | |
380 | } | |
0122f6e6 | 381 | \f |
51852a57 | 382 | /* ovsthread_stats. */ |
ed27e010 | 383 | |
51852a57 BP |
384 | void |
385 | ovsthread_stats_init(struct ovsthread_stats *stats) | |
ed27e010 | 386 | { |
ed27e010 BP |
387 | int i; |
388 | ||
51852a57 BP |
389 | ovs_mutex_init(&stats->mutex); |
390 | for (i = 0; i < ARRAY_SIZE(stats->buckets); i++) { | |
391 | stats->buckets[i] = NULL; | |
ed27e010 | 392 | } |
ed27e010 BP |
393 | } |
394 | ||
395 | void | |
51852a57 | 396 | ovsthread_stats_destroy(struct ovsthread_stats *stats) |
ed27e010 | 397 | { |
51852a57 | 398 | ovs_mutex_destroy(&stats->mutex); |
ed27e010 BP |
399 | } |
400 | ||
51852a57 BP |
401 | void * |
402 | ovsthread_stats_bucket_get(struct ovsthread_stats *stats, | |
403 | void *(*new_bucket)(void)) | |
ed27e010 | 404 | { |
51852a57 BP |
405 | unsigned int idx = ovsthread_id_self() & (ARRAY_SIZE(stats->buckets) - 1); |
406 | void *bucket = stats->buckets[idx]; | |
407 | if (!bucket) { | |
408 | ovs_mutex_lock(&stats->mutex); | |
409 | bucket = stats->buckets[idx]; | |
410 | if (!bucket) { | |
411 | bucket = stats->buckets[idx] = new_bucket(); | |
412 | } | |
413 | ovs_mutex_unlock(&stats->mutex); | |
414 | } | |
415 | return bucket; | |
ed27e010 BP |
416 | } |
417 | ||
51852a57 BP |
418 | size_t |
419 | ovs_thread_stats_next_bucket(const struct ovsthread_stats *stats, size_t i) | |
ed27e010 | 420 | { |
51852a57 BP |
421 | for (; i < ARRAY_SIZE(stats->buckets); i++) { |
422 | if (stats->buckets[i]) { | |
423 | break; | |
424 | } | |
ed27e010 | 425 | } |
51852a57 | 426 | return i; |
ed27e010 | 427 | } |
51852a57 | 428 | |
ed27e010 | 429 | \f |
deaa2985 JS |
430 | /* Parses /proc/cpuinfo for the total number of physical cores on this system |
431 | * across all CPU packages, not counting hyper-threads. | |
432 | * | |
433 | * Sets *n_cores to the total number of cores on this system, or 0 if the | |
434 | * number cannot be determined. */ | |
435 | static void | |
436 | parse_cpuinfo(long int *n_cores) | |
437 | { | |
438 | static const char file_name[] = "/proc/cpuinfo"; | |
439 | char line[128]; | |
440 | uint64_t cpu = 0; /* Support up to 64 CPU packages on a single system. */ | |
441 | long int cores = 0; | |
442 | FILE *stream; | |
443 | ||
444 | stream = fopen(file_name, "r"); | |
445 | if (!stream) { | |
1df13259 | 446 | VLOG_DBG("%s: open failed (%s)", file_name, ovs_strerror(errno)); |
deaa2985 JS |
447 | return; |
448 | } | |
449 | ||
450 | while (fgets(line, sizeof line, stream)) { | |
451 | unsigned int id; | |
452 | ||
453 | /* Find the next CPU package. */ | |
454 | if (ovs_scan(line, "physical id%*[^:]: %u", &id)) { | |
455 | if (id > 63) { | |
456 | VLOG_WARN("Counted over 64 CPU packages on this system. " | |
457 | "Parsing %s for core count may be inaccurate.", | |
458 | file_name); | |
459 | cores = 0; | |
460 | break; | |
461 | } | |
462 | ||
463 | if (cpu & (1 << id)) { | |
464 | /* We've already counted this package's cores. */ | |
465 | continue; | |
466 | } | |
467 | cpu |= 1 << id; | |
468 | ||
469 | /* Find the number of cores for this package. */ | |
470 | while (fgets(line, sizeof line, stream)) { | |
471 | int count; | |
472 | ||
473 | if (ovs_scan(line, "cpu cores%*[^:]: %u", &count)) { | |
474 | cores += count; | |
475 | break; | |
476 | } | |
477 | } | |
478 | } | |
479 | } | |
480 | fclose(stream); | |
481 | ||
482 | *n_cores = cores; | |
483 | } | |
484 | ||
0122f6e6 | 485 | /* Returns the total number of cores on this system, or 0 if the number cannot |
deaa2985 JS |
486 | * be determined. |
487 | * | |
488 | * Tries not to count hyper-threads, but may be inaccurate - particularly on | |
489 | * platforms that do not provide /proc/cpuinfo, but also if /proc/cpuinfo is | |
490 | * formatted different to the layout that parse_cpuinfo() expects. */ | |
4974b2b8 | 491 | int |
0122f6e6 JS |
492 | count_cpu_cores(void) |
493 | { | |
deaa2985 JS |
494 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
495 | static long int n_cores; | |
496 | ||
497 | if (ovsthread_once_start(&once)) { | |
fdd73c23 | 498 | #ifndef _WIN32 |
deaa2985 JS |
499 | parse_cpuinfo(&n_cores); |
500 | if (!n_cores) { | |
501 | n_cores = sysconf(_SC_NPROCESSORS_ONLN); | |
502 | } | |
fdd73c23 GS |
503 | #else |
504 | SYSTEM_INFO sysinfo; | |
505 | GetSystemInfo(&sysinfo); | |
506 | n_cores = sysinfo.dwNumberOfProcessors; | |
507 | #endif | |
deaa2985 JS |
508 | ovsthread_once_done(&once); |
509 | } | |
0122f6e6 JS |
510 | |
511 | return n_cores > 0 ? n_cores : 0; | |
512 | } | |
e9020da2 BP |
513 | \f |
514 | /* ovsthread_key. */ | |
515 | ||
516 | #define L1_SIZE 1024 | |
517 | #define L2_SIZE 1024 | |
518 | #define MAX_KEYS (L1_SIZE * L2_SIZE) | |
519 | ||
520 | /* A piece of thread-specific data. */ | |
521 | struct ovsthread_key { | |
522 | struct list list_node; /* In 'inuse_keys' or 'free_keys'. */ | |
523 | void (*destructor)(void *); /* Called at thread exit. */ | |
524 | ||
525 | /* Indexes into the per-thread array in struct ovsthread_key_slots. | |
526 | * This key's data is stored in p1[index / L2_SIZE][index % L2_SIZE]. */ | |
527 | unsigned int index; | |
528 | }; | |
529 | ||
530 | /* Per-thread data structure. */ | |
531 | struct ovsthread_key_slots { | |
532 | struct list list_node; /* In 'slots_list'. */ | |
533 | void **p1[L1_SIZE]; | |
534 | }; | |
535 | ||
536 | /* Contains "struct ovsthread_key_slots *". */ | |
537 | static pthread_key_t tsd_key; | |
538 | ||
539 | /* Guards data structures below. */ | |
540 | static struct ovs_mutex key_mutex = OVS_MUTEX_INITIALIZER; | |
541 | ||
542 | /* 'inuse_keys' holds "struct ovsthread_key"s that have been created and not | |
543 | * yet destroyed. | |
544 | * | |
545 | * 'free_keys' holds "struct ovsthread_key"s that have been deleted and are | |
546 | * ready for reuse. (We keep them around only to be able to easily locate | |
547 | * free indexes.) | |
548 | * | |
549 | * Together, 'inuse_keys' and 'free_keys' hold an ovsthread_key for every index | |
550 | * from 0 to n_keys - 1, inclusive. */ | |
551 | static struct list inuse_keys OVS_GUARDED_BY(key_mutex) | |
552 | = LIST_INITIALIZER(&inuse_keys); | |
553 | static struct list free_keys OVS_GUARDED_BY(key_mutex) | |
554 | = LIST_INITIALIZER(&free_keys); | |
555 | static unsigned int n_keys OVS_GUARDED_BY(key_mutex); | |
556 | ||
557 | /* All existing struct ovsthread_key_slots. */ | |
558 | static struct list slots_list OVS_GUARDED_BY(key_mutex) | |
559 | = LIST_INITIALIZER(&slots_list); | |
560 | ||
561 | static void * | |
562 | clear_slot(struct ovsthread_key_slots *slots, unsigned int index) | |
563 | { | |
564 | void **p2 = slots->p1[index / L2_SIZE]; | |
565 | if (p2) { | |
566 | void **valuep = &p2[index % L2_SIZE]; | |
567 | void *value = *valuep; | |
568 | *valuep = NULL; | |
569 | return value; | |
570 | } else { | |
571 | return NULL; | |
572 | } | |
573 | } | |
574 | ||
575 | static void | |
576 | ovsthread_key_destruct__(void *slots_) | |
577 | { | |
578 | struct ovsthread_key_slots *slots = slots_; | |
579 | struct ovsthread_key *key; | |
580 | unsigned int n; | |
581 | int i; | |
582 | ||
583 | ovs_mutex_lock(&key_mutex); | |
584 | list_remove(&slots->list_node); | |
585 | LIST_FOR_EACH (key, list_node, &inuse_keys) { | |
586 | void *value = clear_slot(slots, key->index); | |
587 | if (value && key->destructor) { | |
588 | key->destructor(value); | |
589 | } | |
590 | } | |
591 | n = n_keys; | |
592 | ovs_mutex_unlock(&key_mutex); | |
593 | ||
594 | for (i = 0; i < n / L2_SIZE; i++) { | |
595 | free(slots->p1[i]); | |
596 | } | |
597 | free(slots); | |
598 | } | |
599 | ||
600 | /* Initializes '*keyp' as a thread-specific data key. The data items are | |
601 | * initially null in all threads. | |
602 | * | |
603 | * If a thread exits with non-null data, then 'destructor', if nonnull, will be | |
604 | * called passing the final data value as its argument. 'destructor' must not | |
605 | * call any thread-specific data functions in this API. | |
606 | * | |
607 | * This function is similar to xpthread_key_create(). */ | |
608 | void | |
609 | ovsthread_key_create(ovsthread_key_t *keyp, void (*destructor)(void *)) | |
610 | { | |
611 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; | |
612 | struct ovsthread_key *key; | |
613 | ||
614 | if (ovsthread_once_start(&once)) { | |
615 | xpthread_key_create(&tsd_key, ovsthread_key_destruct__); | |
616 | ovsthread_once_done(&once); | |
617 | } | |
618 | ||
619 | ovs_mutex_lock(&key_mutex); | |
620 | if (list_is_empty(&free_keys)) { | |
621 | key = xmalloc(sizeof *key); | |
622 | key->index = n_keys++; | |
623 | if (key->index >= MAX_KEYS) { | |
624 | abort(); | |
625 | } | |
626 | } else { | |
627 | key = CONTAINER_OF(list_pop_back(&free_keys), | |
628 | struct ovsthread_key, list_node); | |
629 | } | |
630 | list_push_back(&inuse_keys, &key->list_node); | |
631 | key->destructor = destructor; | |
632 | ovs_mutex_unlock(&key_mutex); | |
633 | ||
634 | *keyp = key; | |
635 | } | |
636 | ||
637 | /* Frees 'key'. The destructor supplied to ovsthread_key_create(), if any, is | |
638 | * not called. | |
639 | * | |
640 | * This function is similar to xpthread_key_delete(). */ | |
641 | void | |
642 | ovsthread_key_delete(ovsthread_key_t key) | |
643 | { | |
644 | struct ovsthread_key_slots *slots; | |
645 | ||
646 | ovs_mutex_lock(&key_mutex); | |
647 | ||
648 | /* Move 'key' from 'inuse_keys' to 'free_keys'. */ | |
649 | list_remove(&key->list_node); | |
650 | list_push_back(&free_keys, &key->list_node); | |
651 | ||
652 | /* Clear this slot in all threads. */ | |
653 | LIST_FOR_EACH (slots, list_node, &slots_list) { | |
654 | clear_slot(slots, key->index); | |
655 | } | |
656 | ||
657 | ovs_mutex_unlock(&key_mutex); | |
658 | } | |
659 | ||
660 | static void ** | |
661 | ovsthread_key_lookup__(const struct ovsthread_key *key) | |
662 | { | |
663 | struct ovsthread_key_slots *slots; | |
664 | void **p2; | |
665 | ||
666 | slots = pthread_getspecific(tsd_key); | |
667 | if (!slots) { | |
668 | slots = xzalloc(sizeof *slots); | |
669 | ||
670 | ovs_mutex_lock(&key_mutex); | |
671 | pthread_setspecific(tsd_key, slots); | |
672 | list_push_back(&slots_list, &slots->list_node); | |
673 | ovs_mutex_unlock(&key_mutex); | |
674 | } | |
675 | ||
676 | p2 = slots->p1[key->index / L2_SIZE]; | |
677 | if (!p2) { | |
678 | p2 = xzalloc(L2_SIZE * sizeof *p2); | |
679 | slots->p1[key->index / L2_SIZE] = p2; | |
680 | } | |
681 | ||
682 | return &p2[key->index % L2_SIZE]; | |
683 | } | |
684 | ||
685 | /* Sets the value of thread-specific data item 'key', in the current thread, to | |
686 | * 'value'. | |
687 | * | |
688 | * This function is similar to pthread_setspecific(). */ | |
689 | void | |
690 | ovsthread_setspecific(ovsthread_key_t key, const void *value) | |
691 | { | |
692 | *ovsthread_key_lookup__(key) = CONST_CAST(void *, value); | |
693 | } | |
694 | ||
695 | /* Returns the value of thread-specific data item 'key' in the current thread. | |
696 | * | |
697 | * This function is similar to pthread_getspecific(). */ | |
698 | void * | |
699 | ovsthread_getspecific(ovsthread_key_t key) | |
700 | { | |
701 | return *ovsthread_key_lookup__(key); | |
702 | } | |
ec68790f | 703 | #endif |