]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/lib/lockdep/preload.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / tools / lib / lockdep / preload.c
1 #define _GNU_SOURCE
2 #include <pthread.h>
3 #include <stdio.h>
4 #include <dlfcn.h>
5 #include <stdlib.h>
6 #include <sysexits.h>
7 #include <unistd.h>
8 #include "include/liblockdep/mutex.h"
9 #include "../../include/linux/rbtree.h"
10
11 /**
12 * struct lock_lookup - liblockdep's view of a single unique lock
13 * @orig: pointer to the original pthread lock, used for lookups
14 * @dep_map: lockdep's dep_map structure
15 * @key: lockdep's key structure
16 * @node: rb-tree node used to store the lock in a global tree
17 * @name: a unique name for the lock
18 */
19 struct lock_lookup {
20 void *orig; /* Original pthread lock, used for lookups */
21 struct lockdep_map dep_map; /* Since all locks are dynamic, we need
22 * a dep_map and a key for each lock */
23 /*
24 * Wait, there's no support for key classes? Yup :(
25 * Most big projects wrap the pthread api with their own calls to
26 * be compatible with different locking methods. This means that
27 * "classes" will be brokes since the function that creates all
28 * locks will point to a generic locking function instead of the
29 * actual code that wants to do the locking.
30 */
31 struct lock_class_key key;
32 struct rb_node node;
33 #define LIBLOCKDEP_MAX_LOCK_NAME 22
34 char name[LIBLOCKDEP_MAX_LOCK_NAME];
35 };
36
37 /* This is where we store our locks */
38 static struct rb_root locks = RB_ROOT;
39 static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
40
41 /* pthread mutex API */
42
43 #ifdef __GLIBC__
44 extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
45 extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
46 extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
47 extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
48 extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
49 #else
50 #define __pthread_mutex_init NULL
51 #define __pthread_mutex_lock NULL
52 #define __pthread_mutex_trylock NULL
53 #define __pthread_mutex_unlock NULL
54 #define __pthread_mutex_destroy NULL
55 #endif
56 static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
57 const pthread_mutexattr_t *attr) = __pthread_mutex_init;
58 static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
59 static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
60 static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
61 static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
62
63 /* pthread rwlock API */
64
65 #ifdef __GLIBC__
66 extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
67 extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
68 extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
69 extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
70 extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
71 extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
72 extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
73 #else
74 #define __pthread_rwlock_init NULL
75 #define __pthread_rwlock_destroy NULL
76 #define __pthread_rwlock_wrlock NULL
77 #define __pthread_rwlock_trywrlock NULL
78 #define __pthread_rwlock_rdlock NULL
79 #define __pthread_rwlock_tryrdlock NULL
80 #define __pthread_rwlock_unlock NULL
81 #endif
82
83 static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
84 const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
85 static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
86 static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
87 static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
88 static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
89 static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
90 static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
91
92 enum { none, prepare, done, } __init_state;
93 static void init_preload(void);
94 static void try_init_preload(void)
95 {
96 if (__init_state != done)
97 init_preload();
98 }
99
100 static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
101 {
102 struct rb_node **node = &locks.rb_node;
103 struct lock_lookup *l;
104
105 *parent = NULL;
106
107 while (*node) {
108 l = rb_entry(*node, struct lock_lookup, node);
109
110 *parent = *node;
111 if (lock < l->orig)
112 node = &l->node.rb_left;
113 else if (lock > l->orig)
114 node = &l->node.rb_right;
115 else
116 return node;
117 }
118
119 return node;
120 }
121
122 #ifndef LIBLOCKDEP_STATIC_ENTRIES
123 #define LIBLOCKDEP_STATIC_ENTRIES 1024
124 #endif
125
126 static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
127 static int __locks_nr;
128
129 static inline bool is_static_lock(struct lock_lookup *lock)
130 {
131 return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
132 }
133
134 static struct lock_lookup *alloc_lock(void)
135 {
136 if (__init_state != done) {
137 /*
138 * Some programs attempt to initialize and use locks in their
139 * allocation path. This means that a call to malloc() would
140 * result in locks being initialized and locked.
141 *
142 * Why is it an issue for us? dlsym() below will try allocating
143 * to give us the original function. Since this allocation will
144 * result in a locking operations, we have to let pthread deal
145 * with it, but we can't! we don't have the pointer to the
146 * original API since we're inside dlsym() trying to get it
147 */
148
149 int idx = __locks_nr++;
150 if (idx >= ARRAY_SIZE(__locks)) {
151 dprintf(STDERR_FILENO,
152 "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
153 exit(EX_UNAVAILABLE);
154 }
155 return __locks + idx;
156 }
157
158 return malloc(sizeof(struct lock_lookup));
159 }
160
161 static inline void free_lock(struct lock_lookup *lock)
162 {
163 if (likely(!is_static_lock(lock)))
164 free(lock);
165 }
166
167 /**
168 * __get_lock - find or create a lock instance
169 * @lock: pointer to a pthread lock function
170 *
171 * Try to find an existing lock in the rbtree using the provided pointer. If
172 * one wasn't found - create it.
173 */
174 static struct lock_lookup *__get_lock(void *lock)
175 {
176 struct rb_node **node, *parent;
177 struct lock_lookup *l;
178
179 ll_pthread_rwlock_rdlock(&locks_rwlock);
180 node = __get_lock_node(lock, &parent);
181 ll_pthread_rwlock_unlock(&locks_rwlock);
182 if (*node) {
183 return rb_entry(*node, struct lock_lookup, node);
184 }
185
186 /* We didn't find the lock, let's create it */
187 l = alloc_lock();
188 if (l == NULL)
189 return NULL;
190
191 l->orig = lock;
192 /*
193 * Currently the name of the lock is the ptr value of the pthread lock,
194 * while not optimal, it makes debugging a bit easier.
195 *
196 * TODO: Get the real name of the lock using libdwarf
197 */
198 sprintf(l->name, "%p", lock);
199 lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
200
201 ll_pthread_rwlock_wrlock(&locks_rwlock);
202 /* This might have changed since the last time we fetched it */
203 node = __get_lock_node(lock, &parent);
204 rb_link_node(&l->node, parent, node);
205 rb_insert_color(&l->node, &locks);
206 ll_pthread_rwlock_unlock(&locks_rwlock);
207
208 return l;
209 }
210
211 static void __del_lock(struct lock_lookup *lock)
212 {
213 ll_pthread_rwlock_wrlock(&locks_rwlock);
214 rb_erase(&lock->node, &locks);
215 ll_pthread_rwlock_unlock(&locks_rwlock);
216 free_lock(lock);
217 }
218
219 int pthread_mutex_init(pthread_mutex_t *mutex,
220 const pthread_mutexattr_t *attr)
221 {
222 int r;
223
224 /*
225 * We keep trying to init our preload module because there might be
226 * code in init sections that tries to touch locks before we are
227 * initialized, in that case we'll need to manually call preload
228 * to get us going.
229 *
230 * Funny enough, kernel's lockdep had the same issue, and used
231 * (almost) the same solution. See look_up_lock_class() in
232 * kernel/locking/lockdep.c for details.
233 */
234 try_init_preload();
235
236 r = ll_pthread_mutex_init(mutex, attr);
237 if (r == 0)
238 /*
239 * We do a dummy initialization here so that lockdep could
240 * warn us if something fishy is going on - such as
241 * initializing a held lock.
242 */
243 __get_lock(mutex);
244
245 return r;
246 }
247
248 int pthread_mutex_lock(pthread_mutex_t *mutex)
249 {
250 int r;
251
252 try_init_preload();
253
254 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
255 (unsigned long)_RET_IP_);
256 /*
257 * Here's the thing with pthread mutexes: unlike the kernel variant,
258 * they can fail.
259 *
260 * This means that the behaviour here is a bit different from what's
261 * going on in the kernel: there we just tell lockdep that we took the
262 * lock before actually taking it, but here we must deal with the case
263 * that locking failed.
264 *
265 * To do that we'll "release" the lock if locking failed - this way
266 * we'll get lockdep doing the correct checks when we try to take
267 * the lock, and if that fails - we'll be back to the correct
268 * state by releasing it.
269 */
270 r = ll_pthread_mutex_lock(mutex);
271 if (r)
272 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
273
274 return r;
275 }
276
277 int pthread_mutex_trylock(pthread_mutex_t *mutex)
278 {
279 int r;
280
281 try_init_preload();
282
283 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
284 r = ll_pthread_mutex_trylock(mutex);
285 if (r)
286 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
287
288 return r;
289 }
290
291 int pthread_mutex_unlock(pthread_mutex_t *mutex)
292 {
293 int r;
294
295 try_init_preload();
296
297 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
298 /*
299 * Just like taking a lock, only in reverse!
300 *
301 * If we fail releasing the lock, tell lockdep we're holding it again.
302 */
303 r = ll_pthread_mutex_unlock(mutex);
304 if (r)
305 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
306
307 return r;
308 }
309
310 int pthread_mutex_destroy(pthread_mutex_t *mutex)
311 {
312 try_init_preload();
313
314 /*
315 * Let's see if we're releasing a lock that's held.
316 *
317 * TODO: Hook into free() and add that check there as well.
318 */
319 debug_check_no_locks_freed(mutex, sizeof(*mutex));
320 __del_lock(__get_lock(mutex));
321 return ll_pthread_mutex_destroy(mutex);
322 }
323
324 /* This is the rwlock part, very similar to what happened with mutex above */
325 int pthread_rwlock_init(pthread_rwlock_t *rwlock,
326 const pthread_rwlockattr_t *attr)
327 {
328 int r;
329
330 try_init_preload();
331
332 r = ll_pthread_rwlock_init(rwlock, attr);
333 if (r == 0)
334 __get_lock(rwlock);
335
336 return r;
337 }
338
339 int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
340 {
341 try_init_preload();
342
343 debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
344 __del_lock(__get_lock(rwlock));
345 return ll_pthread_rwlock_destroy(rwlock);
346 }
347
348 int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
349 {
350 int r;
351
352 init_preload();
353
354 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
355 r = ll_pthread_rwlock_rdlock(rwlock);
356 if (r)
357 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
358
359 return r;
360 }
361
362 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
363 {
364 int r;
365
366 init_preload();
367
368 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
369 r = ll_pthread_rwlock_tryrdlock(rwlock);
370 if (r)
371 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
372
373 return r;
374 }
375
376 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
377 {
378 int r;
379
380 init_preload();
381
382 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
383 r = ll_pthread_rwlock_trywrlock(rwlock);
384 if (r)
385 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
386
387 return r;
388 }
389
390 int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
391 {
392 int r;
393
394 init_preload();
395
396 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
397 r = ll_pthread_rwlock_wrlock(rwlock);
398 if (r)
399 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
400
401 return r;
402 }
403
404 int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
405 {
406 int r;
407
408 init_preload();
409
410 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
411 r = ll_pthread_rwlock_unlock(rwlock);
412 if (r)
413 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
414
415 return r;
416 }
417
418 __attribute__((constructor)) static void init_preload(void)
419 {
420 if (__init_state == done)
421 return;
422
423 #ifndef __GLIBC__
424 __init_state = prepare;
425
426 ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
427 ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
428 ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
429 ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
430 ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
431
432 ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
433 ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
434 ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
435 ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
436 ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
437 ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
438 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
439 #endif
440
441 __init_state = done;
442 }