8 #include "include/liblockdep/mutex.h"
9 #include "../../include/linux/rbtree.h"
12 * struct lock_lookup - liblockdep's view of a single unique lock
13 * @orig: pointer to the original pthread lock, used for lookups
14 * @dep_map: lockdep's dep_map structure
15 * @key: lockdep's key structure
16 * @node: rb-tree node used to store the lock in a global tree
17 * @name: a unique name for the lock
20 void *orig
; /* Original pthread lock, used for lookups */
21 struct lockdep_map dep_map
; /* Since all locks are dynamic, we need
22 * a dep_map and a key for each lock */
24 * Wait, there's no support for key classes? Yup :(
25 * Most big projects wrap the pthread api with their own calls to
26 * be compatible with different locking methods. This means that
27 * "classes" will be brokes since the function that creates all
28 * locks will point to a generic locking function instead of the
29 * actual code that wants to do the locking.
31 struct lock_class_key key
;
33 #define LIBLOCKDEP_MAX_LOCK_NAME 22
34 char name
[LIBLOCKDEP_MAX_LOCK_NAME
];
37 /* This is where we store our locks */
38 static struct rb_root locks
= RB_ROOT
;
39 static pthread_rwlock_t locks_rwlock
= PTHREAD_RWLOCK_INITIALIZER
;
41 /* pthread mutex API */
44 extern int __pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
);
45 extern int __pthread_mutex_lock(pthread_mutex_t
*mutex
);
46 extern int __pthread_mutex_trylock(pthread_mutex_t
*mutex
);
47 extern int __pthread_mutex_unlock(pthread_mutex_t
*mutex
);
48 extern int __pthread_mutex_destroy(pthread_mutex_t
*mutex
);
50 #define __pthread_mutex_init NULL
51 #define __pthread_mutex_lock NULL
52 #define __pthread_mutex_trylock NULL
53 #define __pthread_mutex_unlock NULL
54 #define __pthread_mutex_destroy NULL
56 static int (*ll_pthread_mutex_init
)(pthread_mutex_t
*mutex
,
57 const pthread_mutexattr_t
*attr
) = __pthread_mutex_init
;
58 static int (*ll_pthread_mutex_lock
)(pthread_mutex_t
*mutex
) = __pthread_mutex_lock
;
59 static int (*ll_pthread_mutex_trylock
)(pthread_mutex_t
*mutex
) = __pthread_mutex_trylock
;
60 static int (*ll_pthread_mutex_unlock
)(pthread_mutex_t
*mutex
) = __pthread_mutex_unlock
;
61 static int (*ll_pthread_mutex_destroy
)(pthread_mutex_t
*mutex
) = __pthread_mutex_destroy
;
63 /* pthread rwlock API */
66 extern int __pthread_rwlock_init(pthread_rwlock_t
*rwlock
, const pthread_rwlockattr_t
*attr
);
67 extern int __pthread_rwlock_destroy(pthread_rwlock_t
*rwlock
);
68 extern int __pthread_rwlock_wrlock(pthread_rwlock_t
*rwlock
);
69 extern int __pthread_rwlock_trywrlock(pthread_rwlock_t
*rwlock
);
70 extern int __pthread_rwlock_rdlock(pthread_rwlock_t
*rwlock
);
71 extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t
*rwlock
);
72 extern int __pthread_rwlock_unlock(pthread_rwlock_t
*rwlock
);
74 #define __pthread_rwlock_init NULL
75 #define __pthread_rwlock_destroy NULL
76 #define __pthread_rwlock_wrlock NULL
77 #define __pthread_rwlock_trywrlock NULL
78 #define __pthread_rwlock_rdlock NULL
79 #define __pthread_rwlock_tryrdlock NULL
80 #define __pthread_rwlock_unlock NULL
83 static int (*ll_pthread_rwlock_init
)(pthread_rwlock_t
*rwlock
,
84 const pthread_rwlockattr_t
*attr
) = __pthread_rwlock_init
;
85 static int (*ll_pthread_rwlock_destroy
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_destroy
;
86 static int (*ll_pthread_rwlock_rdlock
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_rdlock
;
87 static int (*ll_pthread_rwlock_tryrdlock
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_tryrdlock
;
88 static int (*ll_pthread_rwlock_trywrlock
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_trywrlock
;
89 static int (*ll_pthread_rwlock_wrlock
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_wrlock
;
90 static int (*ll_pthread_rwlock_unlock
)(pthread_rwlock_t
*rwlock
) = __pthread_rwlock_unlock
;
92 enum { none
, prepare
, done
, } __init_state
;
93 static void init_preload(void);
94 static void try_init_preload(void)
96 if (__init_state
!= done
)
100 static struct rb_node
**__get_lock_node(void *lock
, struct rb_node
**parent
)
102 struct rb_node
**node
= &locks
.rb_node
;
103 struct lock_lookup
*l
;
108 l
= rb_entry(*node
, struct lock_lookup
, node
);
112 node
= &l
->node
.rb_left
;
113 else if (lock
> l
->orig
)
114 node
= &l
->node
.rb_right
;
122 #ifndef LIBLOCKDEP_STATIC_ENTRIES
123 #define LIBLOCKDEP_STATIC_ENTRIES 1024
126 static struct lock_lookup __locks
[LIBLOCKDEP_STATIC_ENTRIES
];
127 static int __locks_nr
;
129 static inline bool is_static_lock(struct lock_lookup
*lock
)
131 return lock
>= __locks
&& lock
< __locks
+ ARRAY_SIZE(__locks
);
134 static struct lock_lookup
*alloc_lock(void)
136 if (__init_state
!= done
) {
138 * Some programs attempt to initialize and use locks in their
139 * allocation path. This means that a call to malloc() would
140 * result in locks being initialized and locked.
142 * Why is it an issue for us? dlsym() below will try allocating
143 * to give us the original function. Since this allocation will
144 * result in a locking operations, we have to let pthread deal
145 * with it, but we can't! we don't have the pointer to the
146 * original API since we're inside dlsym() trying to get it
149 int idx
= __locks_nr
++;
150 if (idx
>= ARRAY_SIZE(__locks
)) {
151 dprintf(STDERR_FILENO
,
152 "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
153 exit(EX_UNAVAILABLE
);
155 return __locks
+ idx
;
158 return malloc(sizeof(struct lock_lookup
));
161 static inline void free_lock(struct lock_lookup
*lock
)
163 if (likely(!is_static_lock(lock
)))
168 * __get_lock - find or create a lock instance
169 * @lock: pointer to a pthread lock function
171 * Try to find an existing lock in the rbtree using the provided pointer. If
172 * one wasn't found - create it.
174 static struct lock_lookup
*__get_lock(void *lock
)
176 struct rb_node
**node
, *parent
;
177 struct lock_lookup
*l
;
179 ll_pthread_rwlock_rdlock(&locks_rwlock
);
180 node
= __get_lock_node(lock
, &parent
);
181 ll_pthread_rwlock_unlock(&locks_rwlock
);
183 return rb_entry(*node
, struct lock_lookup
, node
);
186 /* We didn't find the lock, let's create it */
193 * Currently the name of the lock is the ptr value of the pthread lock,
194 * while not optimal, it makes debugging a bit easier.
196 * TODO: Get the real name of the lock using libdwarf
198 sprintf(l
->name
, "%p", lock
);
199 lockdep_init_map(&l
->dep_map
, l
->name
, &l
->key
, 0);
201 ll_pthread_rwlock_wrlock(&locks_rwlock
);
202 /* This might have changed since the last time we fetched it */
203 node
= __get_lock_node(lock
, &parent
);
204 rb_link_node(&l
->node
, parent
, node
);
205 rb_insert_color(&l
->node
, &locks
);
206 ll_pthread_rwlock_unlock(&locks_rwlock
);
211 static void __del_lock(struct lock_lookup
*lock
)
213 ll_pthread_rwlock_wrlock(&locks_rwlock
);
214 rb_erase(&lock
->node
, &locks
);
215 ll_pthread_rwlock_unlock(&locks_rwlock
);
219 int pthread_mutex_init(pthread_mutex_t
*mutex
,
220 const pthread_mutexattr_t
*attr
)
225 * We keep trying to init our preload module because there might be
226 * code in init sections that tries to touch locks before we are
227 * initialized, in that case we'll need to manually call preload
230 * Funny enough, kernel's lockdep had the same issue, and used
231 * (almost) the same solution. See look_up_lock_class() in
232 * kernel/locking/lockdep.c for details.
236 r
= ll_pthread_mutex_init(mutex
, attr
);
239 * We do a dummy initialization here so that lockdep could
240 * warn us if something fishy is going on - such as
241 * initializing a held lock.
248 int pthread_mutex_lock(pthread_mutex_t
*mutex
)
254 lock_acquire(&__get_lock(mutex
)->dep_map
, 0, 0, 0, 1, NULL
,
255 (unsigned long)_RET_IP_
);
257 * Here's the thing with pthread mutexes: unlike the kernel variant,
260 * This means that the behaviour here is a bit different from what's
261 * going on in the kernel: there we just tell lockdep that we took the
262 * lock before actually taking it, but here we must deal with the case
263 * that locking failed.
265 * To do that we'll "release" the lock if locking failed - this way
266 * we'll get lockdep doing the correct checks when we try to take
267 * the lock, and if that fails - we'll be back to the correct
268 * state by releasing it.
270 r
= ll_pthread_mutex_lock(mutex
);
272 lock_release(&__get_lock(mutex
)->dep_map
, 0, (unsigned long)_RET_IP_
);
277 int pthread_mutex_trylock(pthread_mutex_t
*mutex
)
283 lock_acquire(&__get_lock(mutex
)->dep_map
, 0, 1, 0, 1, NULL
, (unsigned long)_RET_IP_
);
284 r
= ll_pthread_mutex_trylock(mutex
);
286 lock_release(&__get_lock(mutex
)->dep_map
, 0, (unsigned long)_RET_IP_
);
291 int pthread_mutex_unlock(pthread_mutex_t
*mutex
)
297 lock_release(&__get_lock(mutex
)->dep_map
, 0, (unsigned long)_RET_IP_
);
299 * Just like taking a lock, only in reverse!
301 * If we fail releasing the lock, tell lockdep we're holding it again.
303 r
= ll_pthread_mutex_unlock(mutex
);
305 lock_acquire(&__get_lock(mutex
)->dep_map
, 0, 0, 0, 1, NULL
, (unsigned long)_RET_IP_
);
310 int pthread_mutex_destroy(pthread_mutex_t
*mutex
)
315 * Let's see if we're releasing a lock that's held.
317 * TODO: Hook into free() and add that check there as well.
319 debug_check_no_locks_freed(mutex
, sizeof(*mutex
));
320 __del_lock(__get_lock(mutex
));
321 return ll_pthread_mutex_destroy(mutex
);
324 /* This is the rwlock part, very similar to what happened with mutex above */
325 int pthread_rwlock_init(pthread_rwlock_t
*rwlock
,
326 const pthread_rwlockattr_t
*attr
)
332 r
= ll_pthread_rwlock_init(rwlock
, attr
);
339 int pthread_rwlock_destroy(pthread_rwlock_t
*rwlock
)
343 debug_check_no_locks_freed(rwlock
, sizeof(*rwlock
));
344 __del_lock(__get_lock(rwlock
));
345 return ll_pthread_rwlock_destroy(rwlock
);
348 int pthread_rwlock_rdlock(pthread_rwlock_t
*rwlock
)
354 lock_acquire(&__get_lock(rwlock
)->dep_map
, 0, 0, 2, 1, NULL
, (unsigned long)_RET_IP_
);
355 r
= ll_pthread_rwlock_rdlock(rwlock
);
357 lock_release(&__get_lock(rwlock
)->dep_map
, 0, (unsigned long)_RET_IP_
);
362 int pthread_rwlock_tryrdlock(pthread_rwlock_t
*rwlock
)
368 lock_acquire(&__get_lock(rwlock
)->dep_map
, 0, 1, 2, 1, NULL
, (unsigned long)_RET_IP_
);
369 r
= ll_pthread_rwlock_tryrdlock(rwlock
);
371 lock_release(&__get_lock(rwlock
)->dep_map
, 0, (unsigned long)_RET_IP_
);
376 int pthread_rwlock_trywrlock(pthread_rwlock_t
*rwlock
)
382 lock_acquire(&__get_lock(rwlock
)->dep_map
, 0, 1, 0, 1, NULL
, (unsigned long)_RET_IP_
);
383 r
= ll_pthread_rwlock_trywrlock(rwlock
);
385 lock_release(&__get_lock(rwlock
)->dep_map
, 0, (unsigned long)_RET_IP_
);
390 int pthread_rwlock_wrlock(pthread_rwlock_t
*rwlock
)
396 lock_acquire(&__get_lock(rwlock
)->dep_map
, 0, 0, 0, 1, NULL
, (unsigned long)_RET_IP_
);
397 r
= ll_pthread_rwlock_wrlock(rwlock
);
399 lock_release(&__get_lock(rwlock
)->dep_map
, 0, (unsigned long)_RET_IP_
);
404 int pthread_rwlock_unlock(pthread_rwlock_t
*rwlock
)
410 lock_release(&__get_lock(rwlock
)->dep_map
, 0, (unsigned long)_RET_IP_
);
411 r
= ll_pthread_rwlock_unlock(rwlock
);
413 lock_acquire(&__get_lock(rwlock
)->dep_map
, 0, 0, 0, 1, NULL
, (unsigned long)_RET_IP_
);
418 __attribute__((constructor
)) static void init_preload(void)
420 if (__init_state
== done
)
424 __init_state
= prepare
;
426 ll_pthread_mutex_init
= dlsym(RTLD_NEXT
, "pthread_mutex_init");
427 ll_pthread_mutex_lock
= dlsym(RTLD_NEXT
, "pthread_mutex_lock");
428 ll_pthread_mutex_trylock
= dlsym(RTLD_NEXT
, "pthread_mutex_trylock");
429 ll_pthread_mutex_unlock
= dlsym(RTLD_NEXT
, "pthread_mutex_unlock");
430 ll_pthread_mutex_destroy
= dlsym(RTLD_NEXT
, "pthread_mutex_destroy");
432 ll_pthread_rwlock_init
= dlsym(RTLD_NEXT
, "pthread_rwlock_init");
433 ll_pthread_rwlock_destroy
= dlsym(RTLD_NEXT
, "pthread_rwlock_destroy");
434 ll_pthread_rwlock_rdlock
= dlsym(RTLD_NEXT
, "pthread_rwlock_rdlock");
435 ll_pthread_rwlock_tryrdlock
= dlsym(RTLD_NEXT
, "pthread_rwlock_tryrdlock");
436 ll_pthread_rwlock_wrlock
= dlsym(RTLD_NEXT
, "pthread_rwlock_wrlock");
437 ll_pthread_rwlock_trywrlock
= dlsym(RTLD_NEXT
, "pthread_rwlock_trywrlock");
438 ll_pthread_rwlock_unlock
= dlsym(RTLD_NEXT
, "pthread_rwlock_unlock");