]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * This is a reimplementation of a subset of the pthread_getspecific/setspecific | |
3 | * interface. This appears to outperform the standard linuxthreads one | |
4 | * by a significant margin. | |
5 | * The major restriction is that each thread may only make a single | |
6 | * pthread_setspecific call on a single key. (The current data structure | |
7 | * doesn't really require that. The restriction should be easily removable.) | |
8 | * We don't currently support the destruction functions, though that | |
9 | * could be done. | |
10 | * We also currently assume that only one pthread_setspecific call | |
11 | * can be executed at a time, though that assumption would be easy to remove | |
12 | * by adding a lock. | |
13 | */ | |
14 | ||
15 | #include <errno.h> | |
16 | #include "atomic_ops.h" | |
17 | ||
18 | /* Called during key creation or setspecific. */ | |
19 | /* For the GC we already hold lock. */ | |
20 | /* Currently allocated objects leak on thread exit. */ | |
21 | /* That's hard to fix, but OK if we allocate garbage */ | |
22 | /* collected memory. */ | |
23 | #define MALLOC_CLEAR(n) GC_INTERNAL_MALLOC(n, NORMAL) | |
24 | #define PREFIXED(name) GC_##name | |
25 | ||
26 | #define TS_CACHE_SIZE 1024 | |
27 | #define CACHE_HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_CACHE_SIZE - 1)) | |
28 | #define TS_HASH_SIZE 1024 | |
29 | #define HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_HASH_SIZE - 1)) | |
30 | ||
31 | /* An entry describing a thread-specific value for a given thread. */ | |
32 | /* All such accessible structures preserve the invariant that if either */ | |
33 | /* thread is a valid pthread id or qtid is a valid "quick tread id" */ | |
34 | /* for a thread, then value holds the corresponding thread specific */ | |
35 | /* value. This invariant must be preserved at ALL times, since */ | |
36 | /* asynchronous reads are allowed. */ | |
37 | typedef struct thread_specific_entry { | |
38 | volatile AO_t qtid; /* quick thread id, only for cache */ | |
39 | void * value; | |
40 | struct thread_specific_entry *next; | |
41 | pthread_t thread; | |
42 | } tse; | |
43 | ||
44 | ||
45 | /* We represent each thread-specific datum as two tables. The first is */ | |
46 | /* a cache, indexed by a "quick thread identifier". The "quick" thread */ | |
47 | /* identifier is an easy to compute value, which is guaranteed to */ | |
48 | /* determine the thread, though a thread may correspond to more than */ | |
49 | /* one value. We typically use the address of a page in the stack. */ | |
50 | /* The second is a hash table, indexed by pthread_self(). It is used */ | |
51 | /* only as a backup. */ | |
52 | ||
53 | /* Return the "quick thread id". Default version. Assumes page size, */ | |
54 | /* or at least thread stack separation, is at least 4K. */ | |
55 | /* Must be defined so that it never returns 0. (Page 0 can't really */ | |
56 | /* be part of any stack, since that would make 0 a valid stack pointer.)*/ | |
57 | static __inline__ unsigned long quick_thread_id() { | |
58 | int dummy; | |
59 | return (unsigned long)(&dummy) >> 12; | |
60 | } | |
61 | ||
62 | #define INVALID_QTID ((unsigned long)0) | |
63 | #define INVALID_THREADID ((pthread_t)0) | |
64 | ||
65 | typedef struct thread_specific_data { | |
66 | tse * volatile cache[TS_CACHE_SIZE]; | |
67 | /* A faster index to the hash table */ | |
68 | tse * hash[TS_HASH_SIZE]; | |
69 | pthread_mutex_t lock; | |
70 | } tsd; | |
71 | ||
72 | typedef tsd * PREFIXED(key_t); | |
73 | ||
74 | extern int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)); | |
75 | ||
76 | extern int PREFIXED(setspecific) (tsd * key, void * value); | |
77 | ||
78 | extern void PREFIXED(remove_specific) (tsd * key); | |
79 | ||
80 | /* An internal version of getspecific that assumes a cache miss. */ | |
81 | void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid, | |
82 | tse * volatile * cache_entry); | |
83 | ||
84 | static __inline__ void * PREFIXED(getspecific) (tsd * key) { | |
85 | long qtid = quick_thread_id(); | |
86 | unsigned hash_val = CACHE_HASH(qtid); | |
87 | tse * volatile * entry_ptr = key -> cache + hash_val; | |
88 | tse * entry = *entry_ptr; /* Must be loaded only once. */ | |
89 | if (EXPECT(entry -> qtid == qtid, 1)) { | |
90 | GC_ASSERT(entry -> thread == pthread_self()); | |
91 | return entry -> value; | |
92 | } | |
93 | return PREFIXED(slow_getspecific) (key, qtid, entry_ptr); | |
94 | } | |
95 | ||
96 |