4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * Also see Documentation/mutex-design.txt.
15 #include <linux/mutex.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
22 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
23 * which forces all calls into the slowpath:
25 #ifdef CONFIG_DEBUG_MUTEXES
26 # include "mutex-debug.h"
27 # include <asm-generic/mutex-null.h>
30 # include <asm/mutex.h>
34 * mutex_init - initialize the mutex
35 * @lock: the mutex to be initialized
37 * Initialize the mutex to unlocked state.
39 * It is not allowed to initialize an already locked mutex.
41 void fastcall
__mutex_init(struct mutex
*lock
, const char *name
)
43 atomic_set(&lock
->count
, 1);
44 spin_lock_init(&lock
->wait_lock
);
45 INIT_LIST_HEAD(&lock
->wait_list
);
47 debug_mutex_init(lock
, name
);
50 EXPORT_SYMBOL(__mutex_init
);
53 * We split the mutex lock/unlock logic into separate fastpath and
54 * slowpath functions, to reduce the register pressure on the fastpath.
55 * We also put the fastpath first in the kernel image, to make sure the
56 * branch is predicted by the CPU as default-untaken.
58 static void fastcall noinline __sched
59 __mutex_lock_slowpath(atomic_t
*lock_count __IP_DECL__
);
62 * mutex_lock - acquire the mutex
63 * @lock: the mutex to be acquired
65 * Lock the mutex exclusively for this task. If the mutex is not
66 * available right now, it will sleep until it can get it.
68 * The mutex must later on be released by the same task that
69 * acquired it. Recursive locking is not allowed. The task
70 * may not exit without first unlocking the mutex. Also, kernel
71 * memory where the mutex resides mutex must not be freed with
72 * the mutex still locked. The mutex must first be initialized
73 * (or statically defined) before it can be locked. memset()-ing
74 * the mutex to 0 is not allowed.
76 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
77 * checks that will enforce the restrictions and will also do
78 * deadlock debugging. )
80 * This function is similar to (but not equivalent to) down().
82 void fastcall __sched
mutex_lock(struct mutex
*lock
)
85 * The locking fastpath is the 1->0 transition from
86 * 'unlocked' into 'locked' state.
88 * NOTE: if asm/mutex.h is included, then some architectures
89 * rely on mutex_lock() having _no other code_ here but this
90 * fastpath. That allows the assembly fastpath to do
91 * tail-merging optimizations. (If you want to put testcode
92 * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
94 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
97 EXPORT_SYMBOL(mutex_lock
);
99 static void fastcall noinline __sched
100 __mutex_unlock_slowpath(atomic_t
*lock_count __IP_DECL__
);
103 * mutex_unlock - release the mutex
104 * @lock: the mutex to be released
106 * Unlock a mutex that has been locked by this task previously.
108 * This function must not be used in interrupt context. Unlocking
109 * of a not locked mutex is not allowed.
111 * This function is similar to (but not equivalent to) up().
113 void fastcall __sched
mutex_unlock(struct mutex
*lock
)
116 * The unlocking fastpath is the 0->1 transition from 'locked'
117 * into 'unlocked' state:
119 * NOTE: no other code must be here - see mutex_lock() .
121 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
124 EXPORT_SYMBOL(mutex_unlock
);
127 * Lock a mutex (possibly interruptible), slowpath:
129 static inline int __sched
130 __mutex_lock_common(struct mutex
*lock
, long state __IP_DECL__
)
132 struct task_struct
*task
= current
;
133 struct mutex_waiter waiter
;
134 unsigned int old_val
;
136 debug_mutex_init_waiter(&waiter
);
138 spin_lock_mutex(&lock
->wait_lock
);
140 debug_mutex_add_waiter(lock
, &waiter
, task
->thread_info
, ip
);
142 /* add waiting tasks to the end of the waitqueue (FIFO): */
143 list_add_tail(&waiter
.list
, &lock
->wait_list
);
148 * Lets try to take the lock again - this is needed even if
149 * we get here for the first time (shortly after failing to
150 * acquire the lock), to make sure that we get a wakeup once
151 * it's unlocked. Later on, if we sleep, this is the
152 * operation that gives us the lock. We xchg it to -1, so
153 * that when we release the lock, we properly wake up the
156 old_val
= atomic_xchg(&lock
->count
, -1);
161 * got a signal? (This code gets eliminated in the
162 * TASK_UNINTERRUPTIBLE case.)
164 if (unlikely(state
== TASK_INTERRUPTIBLE
&&
165 signal_pending(task
))) {
166 mutex_remove_waiter(lock
, &waiter
, task
->thread_info
);
167 spin_unlock_mutex(&lock
->wait_lock
);
169 debug_mutex_free_waiter(&waiter
);
172 __set_task_state(task
, state
);
174 /* didnt get the lock, go to sleep: */
175 spin_unlock_mutex(&lock
->wait_lock
);
177 spin_lock_mutex(&lock
->wait_lock
);
180 /* got the lock - rejoice! */
181 mutex_remove_waiter(lock
, &waiter
, task
->thread_info
);
182 debug_mutex_set_owner(lock
, task
->thread_info __IP__
);
184 /* set it to 0 if there are no waiters left: */
185 if (likely(list_empty(&lock
->wait_list
)))
186 atomic_set(&lock
->count
, 0);
188 spin_unlock_mutex(&lock
->wait_lock
);
190 debug_mutex_free_waiter(&waiter
);
192 DEBUG_WARN_ON(list_empty(&lock
->held_list
));
193 DEBUG_WARN_ON(lock
->owner
!= task
->thread_info
);
198 static void fastcall noinline __sched
199 __mutex_lock_slowpath(atomic_t
*lock_count __IP_DECL__
)
201 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
203 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE __IP__
);
207 * Release the lock, slowpath:
209 static fastcall noinline
void
210 __mutex_unlock_slowpath(atomic_t
*lock_count __IP_DECL__
)
212 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
214 DEBUG_WARN_ON(lock
->owner
!= current_thread_info());
216 spin_lock_mutex(&lock
->wait_lock
);
219 * some architectures leave the lock unlocked in the fastpath failure
220 * case, others need to leave it locked. In the later case we have to
223 if (__mutex_slowpath_needs_to_unlock())
224 atomic_set(&lock
->count
, 1);
226 debug_mutex_unlock(lock
);
228 if (!list_empty(&lock
->wait_list
)) {
229 /* get the first entry from the wait-list: */
230 struct mutex_waiter
*waiter
=
231 list_entry(lock
->wait_list
.next
,
232 struct mutex_waiter
, list
);
234 debug_mutex_wake_waiter(lock
, waiter
);
236 wake_up_process(waiter
->task
);
239 debug_mutex_clear_owner(lock
);
241 spin_unlock_mutex(&lock
->wait_lock
);
245 * Here come the less common (and hence less performance-critical) APIs:
246 * mutex_lock_interruptible() and mutex_trylock().
248 static int fastcall noinline __sched
249 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count __IP_DECL__
);
252 * mutex_lock_interruptible - acquire the mutex, interruptable
253 * @lock: the mutex to be acquired
255 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
256 * been acquired or sleep until the mutex becomes available. If a
257 * signal arrives while waiting for the lock then this function
260 * This function is similar to (but not equivalent to) down_interruptible().
262 int fastcall __sched
mutex_lock_interruptible(struct mutex
*lock
)
264 /* NOTE: no other code must be here - see mutex_lock() */
265 return __mutex_fastpath_lock_retval
266 (&lock
->count
, __mutex_lock_interruptible_slowpath
);
269 EXPORT_SYMBOL(mutex_lock_interruptible
);
271 static int fastcall noinline __sched
272 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count __IP_DECL__
)
274 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
276 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE __IP__
);
280 * Spinlock based trylock, we take the spinlock and check whether we
283 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
285 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
288 spin_lock_mutex(&lock
->wait_lock
);
290 prev
= atomic_xchg(&lock
->count
, -1);
291 if (likely(prev
== 1))
292 debug_mutex_set_owner(lock
, current_thread_info() __RET_IP__
);
293 /* Set it back to 0 if there are no waiters: */
294 if (likely(list_empty(&lock
->wait_list
)))
295 atomic_set(&lock
->count
, 0);
297 spin_unlock_mutex(&lock
->wait_lock
);
303 * mutex_trylock - try acquire the mutex, without waiting
304 * @lock: the mutex to be acquired
306 * Try to acquire the mutex atomically. Returns 1 if the mutex
307 * has been acquired successfully, and 0 on contention.
309 * NOTE: this function follows the spin_trylock() convention, so
310 * it is negated to the down_trylock() return values! Be careful
311 * about this when converting semaphore users to mutexes.
313 * This function must not be used in interrupt context. The
314 * mutex must be released by the same task that acquired it.
316 int fastcall
mutex_trylock(struct mutex
*lock
)
318 return __mutex_fastpath_trylock(&lock
->count
,
319 __mutex_trylock_slowpath
);
322 EXPORT_SYMBOL(mutex_trylock
);