]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/mutex.c
mutex: Move mutex spinning code from sched/core.c back to mutex.c
[mirror_ubuntu-artful-kernel.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
0d66bf6d
PZ
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
6053ee3b
IM
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
8bd75c77 22#include <linux/sched/rt.h>
9984de1a 23#include <linux/export.h>
6053ee3b
IM
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
9a11b49a 26#include <linux/debug_locks.h>
6053ee3b
IM
27
28/*
29 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
30 * which forces all calls into the slowpath:
31 */
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34# include <asm-generic/mutex-null.h>
35#else
36# include "mutex.h"
37# include <asm/mutex.h>
38#endif
39
ef5d4707
IM
40void
41__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
42{
43 atomic_set(&lock->count, 1);
44 spin_lock_init(&lock->wait_lock);
45 INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d 46 mutex_clear_owner(lock);
6053ee3b 47
ef5d4707 48 debug_mutex_init(lock, name, key);
6053ee3b
IM
49}
50
51EXPORT_SYMBOL(__mutex_init);
52
e4564f79 53#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
54/*
55 * We split the mutex lock/unlock logic into separate fastpath and
56 * slowpath functions, to reduce the register pressure on the fastpath.
57 * We also put the fastpath first in the kernel image, to make sure the
58 * branch is predicted by the CPU as default-untaken.
59 */
7918baa5 60static __used noinline void __sched
9a11b49a 61__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b 62
ef5dc121 63/**
6053ee3b
IM
64 * mutex_lock - acquire the mutex
65 * @lock: the mutex to be acquired
66 *
67 * Lock the mutex exclusively for this task. If the mutex is not
68 * available right now, it will sleep until it can get it.
69 *
70 * The mutex must later on be released by the same task that
71 * acquired it. Recursive locking is not allowed. The task
72 * may not exit without first unlocking the mutex. Also, kernel
73 * memory where the mutex resides mutex must not be freed with
74 * the mutex still locked. The mutex must first be initialized
75 * (or statically defined) before it can be locked. memset()-ing
76 * the mutex to 0 is not allowed.
77 *
78 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
79 * checks that will enforce the restrictions and will also do
80 * deadlock debugging. )
81 *
82 * This function is similar to (but not equivalent to) down().
83 */
b09d2501 84void __sched mutex_lock(struct mutex *lock)
6053ee3b 85{
c544bdb1 86 might_sleep();
6053ee3b
IM
87 /*
88 * The locking fastpath is the 1->0 transition from
89 * 'unlocked' into 'locked' state.
6053ee3b
IM
90 */
91 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d 92 mutex_set_owner(lock);
6053ee3b
IM
93}
94
95EXPORT_SYMBOL(mutex_lock);
e4564f79 96#endif
6053ee3b 97
41fcb9f2
WL
98#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
99/*
100 * Mutex spinning code migrated from kernel/sched/core.c
101 */
102
103static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
104{
105 if (lock->owner != owner)
106 return false;
107
108 /*
109 * Ensure we emit the owner->on_cpu, dereference _after_ checking
110 * lock->owner still matches owner, if that fails, owner might
111 * point to free()d memory, if it still matches, the rcu_read_lock()
112 * ensures the memory stays valid.
113 */
114 barrier();
115
116 return owner->on_cpu;
117}
118
119/*
120 * Look out! "owner" is an entirely speculative pointer
121 * access and not reliable.
122 */
123static noinline
124int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
125{
126 rcu_read_lock();
127 while (owner_running(lock, owner)) {
128 if (need_resched())
129 break;
130
131 arch_mutex_cpu_relax();
132 }
133 rcu_read_unlock();
134
135 /*
136 * We break out the loop above on need_resched() and when the
137 * owner changed, which is a sign for heavy contention. Return
138 * success only when lock->owner is NULL.
139 */
140 return lock->owner == NULL;
141}
142#endif
143
7918baa5 144static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b 145
ef5dc121 146/**
6053ee3b
IM
147 * mutex_unlock - release the mutex
148 * @lock: the mutex to be released
149 *
150 * Unlock a mutex that has been locked by this task previously.
151 *
152 * This function must not be used in interrupt context. Unlocking
153 * of a not locked mutex is not allowed.
154 *
155 * This function is similar to (but not equivalent to) up().
156 */
7ad5b3a5 157void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
158{
159 /*
160 * The unlocking fastpath is the 0->1 transition from 'locked'
161 * into 'unlocked' state:
6053ee3b 162 */
0d66bf6d
PZ
163#ifndef CONFIG_DEBUG_MUTEXES
164 /*
165 * When debugging is enabled we must not clear the owner before time,
166 * the slow path will always be taken, and that clears the owner field
167 * after verifying that it was indeed current.
168 */
169 mutex_clear_owner(lock);
170#endif
6053ee3b
IM
171 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
172}
173
174EXPORT_SYMBOL(mutex_unlock);
175
176/*
177 * Lock a mutex (possibly interruptible), slowpath:
178 */
179static inline int __sched
e4564f79 180__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
e4c70a66 181 struct lockdep_map *nest_lock, unsigned long ip)
6053ee3b
IM
182{
183 struct task_struct *task = current;
184 struct mutex_waiter waiter;
1fb00c6c 185 unsigned long flags;
6053ee3b 186
41719b03 187 preempt_disable();
e4c70a66 188 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c0226027
FW
189
190#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
191 /*
192 * Optimistic spinning.
193 *
194 * We try to spin for acquisition when we find that there are no
195 * pending waiters and the lock owner is currently running on a
196 * (different) CPU.
197 *
198 * The rationale is that if the lock owner is running, it is likely to
199 * release the lock soon.
200 *
201 * Since this needs the lock owner, and this mutex implementation
202 * doesn't track the owner atomically in the lock field, we need to
203 * track it non-atomically.
204 *
205 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
206 * to serialize everything.
207 */
208
209 for (;;) {
c6eb3dda 210 struct task_struct *owner;
0d66bf6d 211
0d66bf6d
PZ
212 /*
213 * If there's an owner, wait for it to either
214 * release the lock or go to sleep.
215 */
216 owner = ACCESS_ONCE(lock->owner);
217 if (owner && !mutex_spin_on_owner(lock, owner))
218 break;
219
ac6e60ee
CM
220 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
221 lock_acquired(&lock->dep_map, ip);
222 mutex_set_owner(lock);
223 preempt_enable();
224 return 0;
225 }
226
0d66bf6d
PZ
227 /*
228 * When there's no owner, we might have preempted between the
229 * owner acquiring the lock and setting the owner field. If
230 * we're an RT task that will live-lock because we won't let
231 * the owner complete.
232 */
233 if (!owner && (need_resched() || rt_task(task)))
234 break;
235
0d66bf6d
PZ
236 /*
237 * The cpu_relax() call is a compiler barrier which forces
238 * everything in this loop to be re-loaded. We don't need
239 * memory barriers as we'll eventually observe the right
240 * values at the cost of a few extra spins.
241 */
335d7afb 242 arch_mutex_cpu_relax();
0d66bf6d
PZ
243 }
244#endif
1fb00c6c 245 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 246
9a11b49a 247 debug_mutex_lock_common(lock, &waiter);
c9f4f06d 248 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
249
250 /* add waiting tasks to the end of the waitqueue (FIFO): */
251 list_add_tail(&waiter.list, &lock->wait_list);
252 waiter.task = task;
253
93d81d1a 254 if (atomic_xchg(&lock->count, -1) == 1)
4fe87745
PZ
255 goto done;
256
e4564f79 257 lock_contended(&lock->dep_map, ip);
4fe87745 258
6053ee3b
IM
259 for (;;) {
260 /*
261 * Lets try to take the lock again - this is needed even if
262 * we get here for the first time (shortly after failing to
263 * acquire the lock), to make sure that we get a wakeup once
264 * it's unlocked. Later on, if we sleep, this is the
265 * operation that gives us the lock. We xchg it to -1, so
266 * that when we release the lock, we properly wake up the
267 * other waiters:
268 */
93d81d1a 269 if (atomic_xchg(&lock->count, -1) == 1)
6053ee3b
IM
270 break;
271
272 /*
273 * got a signal? (This code gets eliminated in the
274 * TASK_UNINTERRUPTIBLE case.)
275 */
6ad36762 276 if (unlikely(signal_pending_state(state, task))) {
ad776537
LH
277 mutex_remove_waiter(lock, &waiter,
278 task_thread_info(task));
e4564f79 279 mutex_release(&lock->dep_map, 1, ip);
1fb00c6c 280 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
281
282 debug_mutex_free_waiter(&waiter);
41719b03 283 preempt_enable();
6053ee3b
IM
284 return -EINTR;
285 }
286 __set_task_state(task, state);
287
25985edc 288 /* didn't get the lock, go to sleep: */
1fb00c6c 289 spin_unlock_mutex(&lock->wait_lock, flags);
bd2f5536 290 schedule_preempt_disabled();
1fb00c6c 291 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
292 }
293
4fe87745 294done:
c7e78cff 295 lock_acquired(&lock->dep_map, ip);
6053ee3b 296 /* got the lock - rejoice! */
0d66bf6d
PZ
297 mutex_remove_waiter(lock, &waiter, current_thread_info());
298 mutex_set_owner(lock);
6053ee3b
IM
299
300 /* set it to 0 if there are no waiters left: */
301 if (likely(list_empty(&lock->wait_list)))
302 atomic_set(&lock->count, 0);
303
1fb00c6c 304 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
305
306 debug_mutex_free_waiter(&waiter);
41719b03 307 preempt_enable();
6053ee3b 308
6053ee3b
IM
309 return 0;
310}
311
ef5d4707
IM
312#ifdef CONFIG_DEBUG_LOCK_ALLOC
313void __sched
314mutex_lock_nested(struct mutex *lock, unsigned int subclass)
315{
316 might_sleep();
e4c70a66 317 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707
IM
318}
319
320EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 321
e4c70a66
PZ
322void __sched
323_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
324{
325 might_sleep();
326 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
327}
328
329EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
330
ad776537
LH
331int __sched
332mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
333{
334 might_sleep();
e4c70a66 335 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537
LH
336}
337EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
338
d63a5a74
N
339int __sched
340mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
341{
342 might_sleep();
0d66bf6d 343 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
e4c70a66 344 subclass, NULL, _RET_IP_);
d63a5a74
N
345}
346
347EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
348#endif
349
6053ee3b
IM
350/*
351 * Release the lock, slowpath:
352 */
7ad5b3a5 353static inline void
ef5d4707 354__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 355{
02706647 356 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 357 unsigned long flags;
6053ee3b 358
1fb00c6c 359 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 360 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 361 debug_mutex_unlock(lock);
6053ee3b
IM
362
363 /*
364 * some architectures leave the lock unlocked in the fastpath failure
365 * case, others need to leave it locked. In the later case we have to
366 * unlock it here
367 */
368 if (__mutex_slowpath_needs_to_unlock())
369 atomic_set(&lock->count, 1);
370
6053ee3b
IM
371 if (!list_empty(&lock->wait_list)) {
372 /* get the first entry from the wait-list: */
373 struct mutex_waiter *waiter =
374 list_entry(lock->wait_list.next,
375 struct mutex_waiter, list);
376
377 debug_mutex_wake_waiter(lock, waiter);
378
379 wake_up_process(waiter->task);
380 }
381
1fb00c6c 382 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
383}
384
9a11b49a
IM
385/*
386 * Release the lock, slowpath:
387 */
7918baa5 388static __used noinline void
9a11b49a
IM
389__mutex_unlock_slowpath(atomic_t *lock_count)
390{
ef5d4707 391 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
392}
393
e4564f79 394#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
395/*
396 * Here come the less common (and hence less performance-critical) APIs:
397 * mutex_lock_interruptible() and mutex_trylock().
398 */
7ad5b3a5 399static noinline int __sched
ad776537
LH
400__mutex_lock_killable_slowpath(atomic_t *lock_count);
401
7ad5b3a5 402static noinline int __sched
9a11b49a 403__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b 404
ef5dc121
RD
405/**
406 * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b
IM
407 * @lock: the mutex to be acquired
408 *
409 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
410 * been acquired or sleep until the mutex becomes available. If a
411 * signal arrives while waiting for the lock then this function
412 * returns -EINTR.
413 *
414 * This function is similar to (but not equivalent to) down_interruptible().
415 */
7ad5b3a5 416int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 417{
0d66bf6d
PZ
418 int ret;
419
c544bdb1 420 might_sleep();
0d66bf6d 421 ret = __mutex_fastpath_lock_retval
6053ee3b 422 (&lock->count, __mutex_lock_interruptible_slowpath);
0d66bf6d
PZ
423 if (!ret)
424 mutex_set_owner(lock);
425
426 return ret;
6053ee3b
IM
427}
428
429EXPORT_SYMBOL(mutex_lock_interruptible);
430
7ad5b3a5 431int __sched mutex_lock_killable(struct mutex *lock)
ad776537 432{
0d66bf6d
PZ
433 int ret;
434
ad776537 435 might_sleep();
0d66bf6d 436 ret = __mutex_fastpath_lock_retval
ad776537 437 (&lock->count, __mutex_lock_killable_slowpath);
0d66bf6d
PZ
438 if (!ret)
439 mutex_set_owner(lock);
440
441 return ret;
ad776537
LH
442}
443EXPORT_SYMBOL(mutex_lock_killable);
444
7918baa5 445static __used noinline void __sched
e4564f79
PZ
446__mutex_lock_slowpath(atomic_t *lock_count)
447{
448 struct mutex *lock = container_of(lock_count, struct mutex, count);
449
e4c70a66 450 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79
PZ
451}
452
7ad5b3a5 453static noinline int __sched
ad776537
LH
454__mutex_lock_killable_slowpath(atomic_t *lock_count)
455{
456 struct mutex *lock = container_of(lock_count, struct mutex, count);
457
e4c70a66 458 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537
LH
459}
460
7ad5b3a5 461static noinline int __sched
9a11b49a 462__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
463{
464 struct mutex *lock = container_of(lock_count, struct mutex, count);
465
e4c70a66 466 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
6053ee3b 467}
e4564f79 468#endif
6053ee3b
IM
469
470/*
471 * Spinlock based trylock, we take the spinlock and check whether we
472 * can get the lock:
473 */
474static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
475{
476 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 477 unsigned long flags;
6053ee3b
IM
478 int prev;
479
1fb00c6c 480 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
481
482 prev = atomic_xchg(&lock->count, -1);
ef5d4707 483 if (likely(prev == 1)) {
0d66bf6d 484 mutex_set_owner(lock);
ef5d4707
IM
485 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
486 }
0d66bf6d 487
6053ee3b
IM
488 /* Set it back to 0 if there are no waiters: */
489 if (likely(list_empty(&lock->wait_list)))
490 atomic_set(&lock->count, 0);
491
1fb00c6c 492 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
493
494 return prev == 1;
495}
496
ef5dc121
RD
497/**
498 * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b
IM
499 * @lock: the mutex to be acquired
500 *
501 * Try to acquire the mutex atomically. Returns 1 if the mutex
502 * has been acquired successfully, and 0 on contention.
503 *
504 * NOTE: this function follows the spin_trylock() convention, so
ef5dc121 505 * it is negated from the down_trylock() return values! Be careful
6053ee3b
IM
506 * about this when converting semaphore users to mutexes.
507 *
508 * This function must not be used in interrupt context. The
509 * mutex must be released by the same task that acquired it.
510 */
7ad5b3a5 511int __sched mutex_trylock(struct mutex *lock)
6053ee3b 512{
0d66bf6d
PZ
513 int ret;
514
515 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
516 if (ret)
517 mutex_set_owner(lock);
518
519 return ret;
6053ee3b 520}
6053ee3b 521EXPORT_SYMBOL(mutex_trylock);
a511e3f9
AM
522
523/**
524 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
525 * @cnt: the atomic which we are to dec
526 * @lock: the mutex to return holding if we dec to 0
527 *
528 * return true and hold lock if we dec to 0, return false otherwise
529 */
530int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
531{
532 /* dec if we can't possibly hit 0 */
533 if (atomic_add_unless(cnt, -1, 1))
534 return 0;
535 /* we might hit 0, so take the lock */
536 mutex_lock(lock);
537 if (!atomic_dec_and_test(cnt)) {
538 /* when we actually did the dec, we didn't hit 0 */
539 mutex_unlock(lock);
540 return 0;
541 }
542 /* we hit 0, and we hold the lock */
543 return 1;
544}
545EXPORT_SYMBOL(atomic_dec_and_mutex_lock);