]>
Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
6053ee3b IM |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/interrupt.h> | |
9a11b49a | 25 | #include <linux/debug_locks.h> |
6053ee3b IM |
26 | |
27 | /* | |
28 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
29 | * which forces all calls into the slowpath: | |
30 | */ | |
31 | #ifdef CONFIG_DEBUG_MUTEXES | |
32 | # include "mutex-debug.h" | |
33 | # include <asm-generic/mutex-null.h> | |
34 | #else | |
35 | # include "mutex.h" | |
36 | # include <asm/mutex.h> | |
37 | #endif | |
38 | ||
39 | /*** | |
40 | * mutex_init - initialize the mutex | |
41 | * @lock: the mutex to be initialized | |
0e241ffd | 42 | * @key: the lock_class_key for the class; used by mutex lock debugging |
6053ee3b IM |
43 | * |
44 | * Initialize the mutex to unlocked state. | |
45 | * | |
46 | * It is not allowed to initialize an already locked mutex. | |
47 | */ | |
ef5d4707 IM |
48 | void |
49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b IM |
50 | { |
51 | atomic_set(&lock->count, 1); | |
52 | spin_lock_init(&lock->wait_lock); | |
53 | INIT_LIST_HEAD(&lock->wait_list); | |
0d66bf6d | 54 | mutex_clear_owner(lock); |
6053ee3b | 55 | |
ef5d4707 | 56 | debug_mutex_init(lock, name, key); |
6053ee3b IM |
57 | } |
58 | ||
59 | EXPORT_SYMBOL(__mutex_init); | |
60 | ||
e4564f79 | 61 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
62 | /* |
63 | * We split the mutex lock/unlock logic into separate fastpath and | |
64 | * slowpath functions, to reduce the register pressure on the fastpath. | |
65 | * We also put the fastpath first in the kernel image, to make sure the | |
66 | * branch is predicted by the CPU as default-untaken. | |
67 | */ | |
7918baa5 | 68 | static __used noinline void __sched |
9a11b49a | 69 | __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b IM |
70 | |
71 | /*** | |
72 | * mutex_lock - acquire the mutex | |
73 | * @lock: the mutex to be acquired | |
74 | * | |
75 | * Lock the mutex exclusively for this task. If the mutex is not | |
76 | * available right now, it will sleep until it can get it. | |
77 | * | |
78 | * The mutex must later on be released by the same task that | |
79 | * acquired it. Recursive locking is not allowed. The task | |
80 | * may not exit without first unlocking the mutex. Also, kernel | |
81 | * memory where the mutex resides mutex must not be freed with | |
82 | * the mutex still locked. The mutex must first be initialized | |
83 | * (or statically defined) before it can be locked. memset()-ing | |
84 | * the mutex to 0 is not allowed. | |
85 | * | |
86 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
87 | * checks that will enforce the restrictions and will also do | |
88 | * deadlock debugging. ) | |
89 | * | |
90 | * This function is similar to (but not equivalent to) down(). | |
91 | */ | |
7ad5b3a5 | 92 | void inline __sched mutex_lock(struct mutex *lock) |
6053ee3b | 93 | { |
c544bdb1 | 94 | might_sleep(); |
6053ee3b IM |
95 | /* |
96 | * The locking fastpath is the 1->0 transition from | |
97 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
98 | */ |
99 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
0d66bf6d | 100 | mutex_set_owner(lock); |
6053ee3b IM |
101 | } |
102 | ||
103 | EXPORT_SYMBOL(mutex_lock); | |
e4564f79 | 104 | #endif |
6053ee3b | 105 | |
7918baa5 | 106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b IM |
107 | |
108 | /*** | |
109 | * mutex_unlock - release the mutex | |
110 | * @lock: the mutex to be released | |
111 | * | |
112 | * Unlock a mutex that has been locked by this task previously. | |
113 | * | |
114 | * This function must not be used in interrupt context. Unlocking | |
115 | * of a not locked mutex is not allowed. | |
116 | * | |
117 | * This function is similar to (but not equivalent to) up(). | |
118 | */ | |
7ad5b3a5 | 119 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b IM |
120 | { |
121 | /* | |
122 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
123 | * into 'unlocked' state: | |
6053ee3b | 124 | */ |
0d66bf6d PZ |
125 | #ifndef CONFIG_DEBUG_MUTEXES |
126 | /* | |
127 | * When debugging is enabled we must not clear the owner before time, | |
128 | * the slow path will always be taken, and that clears the owner field | |
129 | * after verifying that it was indeed current. | |
130 | */ | |
131 | mutex_clear_owner(lock); | |
132 | #endif | |
6053ee3b IM |
133 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
134 | } | |
135 | ||
136 | EXPORT_SYMBOL(mutex_unlock); | |
137 | ||
138 | /* | |
139 | * Lock a mutex (possibly interruptible), slowpath: | |
140 | */ | |
141 | static inline int __sched | |
e4564f79 PZ |
142 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
143 | unsigned long ip) | |
6053ee3b IM |
144 | { |
145 | struct task_struct *task = current; | |
146 | struct mutex_waiter waiter; | |
1fb00c6c | 147 | unsigned long flags; |
6053ee3b | 148 | |
41719b03 | 149 | preempt_disable(); |
0d66bf6d | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
36cd3c9f HC |
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ |
152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | |
0d66bf6d PZ |
153 | /* |
154 | * Optimistic spinning. | |
155 | * | |
156 | * We try to spin for acquisition when we find that there are no | |
157 | * pending waiters and the lock owner is currently running on a | |
158 | * (different) CPU. | |
159 | * | |
160 | * The rationale is that if the lock owner is running, it is likely to | |
161 | * release the lock soon. | |
162 | * | |
163 | * Since this needs the lock owner, and this mutex implementation | |
164 | * doesn't track the owner atomically in the lock field, we need to | |
165 | * track it non-atomically. | |
166 | * | |
167 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | |
168 | * to serialize everything. | |
169 | */ | |
170 | ||
171 | for (;;) { | |
172 | struct thread_info *owner; | |
173 | ||
0d66bf6d PZ |
174 | /* |
175 | * If there's an owner, wait for it to either | |
176 | * release the lock or go to sleep. | |
177 | */ | |
178 | owner = ACCESS_ONCE(lock->owner); | |
179 | if (owner && !mutex_spin_on_owner(lock, owner)) | |
180 | break; | |
181 | ||
ac6e60ee CM |
182 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { |
183 | lock_acquired(&lock->dep_map, ip); | |
184 | mutex_set_owner(lock); | |
185 | preempt_enable(); | |
186 | return 0; | |
187 | } | |
188 | ||
0d66bf6d PZ |
189 | /* |
190 | * When there's no owner, we might have preempted between the | |
191 | * owner acquiring the lock and setting the owner field. If | |
192 | * we're an RT task that will live-lock because we won't let | |
193 | * the owner complete. | |
194 | */ | |
195 | if (!owner && (need_resched() || rt_task(task))) | |
196 | break; | |
197 | ||
0d66bf6d PZ |
198 | /* |
199 | * The cpu_relax() call is a compiler barrier which forces | |
200 | * everything in this loop to be re-loaded. We don't need | |
201 | * memory barriers as we'll eventually observe the right | |
202 | * values at the cost of a few extra spins. | |
203 | */ | |
204 | cpu_relax(); | |
205 | } | |
206 | #endif | |
1fb00c6c | 207 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 208 | |
9a11b49a | 209 | debug_mutex_lock_common(lock, &waiter); |
c9f4f06d | 210 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b IM |
211 | |
212 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
213 | list_add_tail(&waiter.list, &lock->wait_list); | |
214 | waiter.task = task; | |
215 | ||
93d81d1a | 216 | if (atomic_xchg(&lock->count, -1) == 1) |
4fe87745 PZ |
217 | goto done; |
218 | ||
e4564f79 | 219 | lock_contended(&lock->dep_map, ip); |
4fe87745 | 220 | |
6053ee3b IM |
221 | for (;;) { |
222 | /* | |
223 | * Lets try to take the lock again - this is needed even if | |
224 | * we get here for the first time (shortly after failing to | |
225 | * acquire the lock), to make sure that we get a wakeup once | |
226 | * it's unlocked. Later on, if we sleep, this is the | |
227 | * operation that gives us the lock. We xchg it to -1, so | |
228 | * that when we release the lock, we properly wake up the | |
229 | * other waiters: | |
230 | */ | |
93d81d1a | 231 | if (atomic_xchg(&lock->count, -1) == 1) |
6053ee3b IM |
232 | break; |
233 | ||
234 | /* | |
235 | * got a signal? (This code gets eliminated in the | |
236 | * TASK_UNINTERRUPTIBLE case.) | |
237 | */ | |
6ad36762 | 238 | if (unlikely(signal_pending_state(state, task))) { |
ad776537 LH |
239 | mutex_remove_waiter(lock, &waiter, |
240 | task_thread_info(task)); | |
e4564f79 | 241 | mutex_release(&lock->dep_map, 1, ip); |
1fb00c6c | 242 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
243 | |
244 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 245 | preempt_enable(); |
6053ee3b IM |
246 | return -EINTR; |
247 | } | |
248 | __set_task_state(task, state); | |
249 | ||
250 | /* didnt get the lock, go to sleep: */ | |
1fb00c6c | 251 | spin_unlock_mutex(&lock->wait_lock, flags); |
41719b03 | 252 | __schedule(); |
1fb00c6c | 253 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
254 | } |
255 | ||
4fe87745 | 256 | done: |
c7e78cff | 257 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 258 | /* got the lock - rejoice! */ |
0d66bf6d PZ |
259 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
260 | mutex_set_owner(lock); | |
6053ee3b IM |
261 | |
262 | /* set it to 0 if there are no waiters left: */ | |
263 | if (likely(list_empty(&lock->wait_list))) | |
264 | atomic_set(&lock->count, 0); | |
265 | ||
1fb00c6c | 266 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
267 | |
268 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 269 | preempt_enable(); |
6053ee3b | 270 | |
6053ee3b IM |
271 | return 0; |
272 | } | |
273 | ||
ef5d4707 IM |
274 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
275 | void __sched | |
276 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
277 | { | |
278 | might_sleep(); | |
e4564f79 | 279 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); |
ef5d4707 IM |
280 | } |
281 | ||
282 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 283 | |
ad776537 LH |
284 | int __sched |
285 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
286 | { | |
287 | might_sleep(); | |
288 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | |
289 | } | |
290 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
291 | ||
d63a5a74 N |
292 | int __sched |
293 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
294 | { | |
295 | might_sleep(); | |
0d66bf6d PZ |
296 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
297 | subclass, _RET_IP_); | |
d63a5a74 N |
298 | } |
299 | ||
300 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
ef5d4707 IM |
301 | #endif |
302 | ||
6053ee3b IM |
303 | /* |
304 | * Release the lock, slowpath: | |
305 | */ | |
7ad5b3a5 | 306 | static inline void |
ef5d4707 | 307 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b | 308 | { |
02706647 | 309 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 310 | unsigned long flags; |
6053ee3b | 311 | |
1fb00c6c | 312 | spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707 | 313 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a | 314 | debug_mutex_unlock(lock); |
6053ee3b IM |
315 | |
316 | /* | |
317 | * some architectures leave the lock unlocked in the fastpath failure | |
318 | * case, others need to leave it locked. In the later case we have to | |
319 | * unlock it here | |
320 | */ | |
321 | if (__mutex_slowpath_needs_to_unlock()) | |
322 | atomic_set(&lock->count, 1); | |
323 | ||
6053ee3b IM |
324 | if (!list_empty(&lock->wait_list)) { |
325 | /* get the first entry from the wait-list: */ | |
326 | struct mutex_waiter *waiter = | |
327 | list_entry(lock->wait_list.next, | |
328 | struct mutex_waiter, list); | |
329 | ||
330 | debug_mutex_wake_waiter(lock, waiter); | |
331 | ||
332 | wake_up_process(waiter->task); | |
333 | } | |
334 | ||
1fb00c6c | 335 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
336 | } |
337 | ||
9a11b49a IM |
338 | /* |
339 | * Release the lock, slowpath: | |
340 | */ | |
7918baa5 | 341 | static __used noinline void |
9a11b49a IM |
342 | __mutex_unlock_slowpath(atomic_t *lock_count) |
343 | { | |
ef5d4707 | 344 | __mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a IM |
345 | } |
346 | ||
e4564f79 | 347 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
348 | /* |
349 | * Here come the less common (and hence less performance-critical) APIs: | |
350 | * mutex_lock_interruptible() and mutex_trylock(). | |
351 | */ | |
7ad5b3a5 | 352 | static noinline int __sched |
ad776537 LH |
353 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
354 | ||
7ad5b3a5 | 355 | static noinline int __sched |
9a11b49a | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
6053ee3b IM |
357 | |
358 | /*** | |
359 | * mutex_lock_interruptible - acquire the mutex, interruptable | |
360 | * @lock: the mutex to be acquired | |
361 | * | |
362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
363 | * been acquired or sleep until the mutex becomes available. If a | |
364 | * signal arrives while waiting for the lock then this function | |
365 | * returns -EINTR. | |
366 | * | |
367 | * This function is similar to (but not equivalent to) down_interruptible(). | |
368 | */ | |
7ad5b3a5 | 369 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 370 | { |
0d66bf6d PZ |
371 | int ret; |
372 | ||
c544bdb1 | 373 | might_sleep(); |
0d66bf6d | 374 | ret = __mutex_fastpath_lock_retval |
6053ee3b | 375 | (&lock->count, __mutex_lock_interruptible_slowpath); |
0d66bf6d PZ |
376 | if (!ret) |
377 | mutex_set_owner(lock); | |
378 | ||
379 | return ret; | |
6053ee3b IM |
380 | } |
381 | ||
382 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
383 | ||
7ad5b3a5 | 384 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 | 385 | { |
0d66bf6d PZ |
386 | int ret; |
387 | ||
ad776537 | 388 | might_sleep(); |
0d66bf6d | 389 | ret = __mutex_fastpath_lock_retval |
ad776537 | 390 | (&lock->count, __mutex_lock_killable_slowpath); |
0d66bf6d PZ |
391 | if (!ret) |
392 | mutex_set_owner(lock); | |
393 | ||
394 | return ret; | |
ad776537 LH |
395 | } |
396 | EXPORT_SYMBOL(mutex_lock_killable); | |
397 | ||
7918baa5 | 398 | static __used noinline void __sched |
e4564f79 PZ |
399 | __mutex_lock_slowpath(atomic_t *lock_count) |
400 | { | |
401 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
402 | ||
403 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | |
404 | } | |
405 | ||
7ad5b3a5 | 406 | static noinline int __sched |
ad776537 LH |
407 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
408 | { | |
409 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
410 | ||
411 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | |
412 | } | |
413 | ||
7ad5b3a5 | 414 | static noinline int __sched |
9a11b49a | 415 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
6053ee3b IM |
416 | { |
417 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
418 | ||
e4564f79 | 419 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); |
6053ee3b | 420 | } |
e4564f79 | 421 | #endif |
6053ee3b IM |
422 | |
423 | /* | |
424 | * Spinlock based trylock, we take the spinlock and check whether we | |
425 | * can get the lock: | |
426 | */ | |
427 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
428 | { | |
429 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 430 | unsigned long flags; |
6053ee3b IM |
431 | int prev; |
432 | ||
1fb00c6c | 433 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
434 | |
435 | prev = atomic_xchg(&lock->count, -1); | |
ef5d4707 | 436 | if (likely(prev == 1)) { |
0d66bf6d | 437 | mutex_set_owner(lock); |
ef5d4707 IM |
438 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
439 | } | |
0d66bf6d | 440 | |
6053ee3b IM |
441 | /* Set it back to 0 if there are no waiters: */ |
442 | if (likely(list_empty(&lock->wait_list))) | |
443 | atomic_set(&lock->count, 0); | |
444 | ||
1fb00c6c | 445 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
446 | |
447 | return prev == 1; | |
448 | } | |
449 | ||
450 | /*** | |
451 | * mutex_trylock - try acquire the mutex, without waiting | |
452 | * @lock: the mutex to be acquired | |
453 | * | |
454 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
455 | * has been acquired successfully, and 0 on contention. | |
456 | * | |
457 | * NOTE: this function follows the spin_trylock() convention, so | |
458 | * it is negated to the down_trylock() return values! Be careful | |
459 | * about this when converting semaphore users to mutexes. | |
460 | * | |
461 | * This function must not be used in interrupt context. The | |
462 | * mutex must be released by the same task that acquired it. | |
463 | */ | |
7ad5b3a5 | 464 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 465 | { |
0d66bf6d PZ |
466 | int ret; |
467 | ||
468 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | |
469 | if (ret) | |
470 | mutex_set_owner(lock); | |
471 | ||
472 | return ret; | |
6053ee3b IM |
473 | } |
474 | ||
475 | EXPORT_SYMBOL(mutex_trylock); |