]>
Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
13 | * Also see Documentation/mutex-design.txt. | |
14 | */ | |
15 | #include <linux/mutex.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/interrupt.h> | |
20 | ||
21 | /* | |
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
23 | * which forces all calls into the slowpath: | |
24 | */ | |
25 | #ifdef CONFIG_DEBUG_MUTEXES | |
26 | # include "mutex-debug.h" | |
27 | # include <asm-generic/mutex-null.h> | |
28 | #else | |
29 | # include "mutex.h" | |
30 | # include <asm/mutex.h> | |
31 | #endif | |
32 | ||
33 | /*** | |
34 | * mutex_init - initialize the mutex | |
35 | * @lock: the mutex to be initialized | |
36 | * | |
37 | * Initialize the mutex to unlocked state. | |
38 | * | |
39 | * It is not allowed to initialize an already locked mutex. | |
40 | */ | |
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | |
42 | { | |
43 | atomic_set(&lock->count, 1); | |
44 | spin_lock_init(&lock->wait_lock); | |
45 | INIT_LIST_HEAD(&lock->wait_list); | |
46 | ||
47 | debug_mutex_init(lock, name); | |
48 | } | |
49 | ||
50 | EXPORT_SYMBOL(__mutex_init); | |
51 | ||
52 | /* | |
53 | * We split the mutex lock/unlock logic into separate fastpath and | |
54 | * slowpath functions, to reduce the register pressure on the fastpath. | |
55 | * We also put the fastpath first in the kernel image, to make sure the | |
56 | * branch is predicted by the CPU as default-untaken. | |
57 | */ | |
58 | static void fastcall noinline __sched | |
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | |
60 | ||
61 | /*** | |
62 | * mutex_lock - acquire the mutex | |
63 | * @lock: the mutex to be acquired | |
64 | * | |
65 | * Lock the mutex exclusively for this task. If the mutex is not | |
66 | * available right now, it will sleep until it can get it. | |
67 | * | |
68 | * The mutex must later on be released by the same task that | |
69 | * acquired it. Recursive locking is not allowed. The task | |
70 | * may not exit without first unlocking the mutex. Also, kernel | |
71 | * memory where the mutex resides mutex must not be freed with | |
72 | * the mutex still locked. The mutex must first be initialized | |
73 | * (or statically defined) before it can be locked. memset()-ing | |
74 | * the mutex to 0 is not allowed. | |
75 | * | |
76 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
77 | * checks that will enforce the restrictions and will also do | |
78 | * deadlock debugging. ) | |
79 | * | |
80 | * This function is similar to (but not equivalent to) down(). | |
81 | */ | |
82 | void fastcall __sched mutex_lock(struct mutex *lock) | |
83 | { | |
84 | /* | |
85 | * The locking fastpath is the 1->0 transition from | |
86 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
87 | */ |
88 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
89 | } | |
90 | ||
91 | EXPORT_SYMBOL(mutex_lock); | |
92 | ||
93 | static void fastcall noinline __sched | |
94 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | |
95 | ||
96 | /*** | |
97 | * mutex_unlock - release the mutex | |
98 | * @lock: the mutex to be released | |
99 | * | |
100 | * Unlock a mutex that has been locked by this task previously. | |
101 | * | |
102 | * This function must not be used in interrupt context. Unlocking | |
103 | * of a not locked mutex is not allowed. | |
104 | * | |
105 | * This function is similar to (but not equivalent to) up(). | |
106 | */ | |
107 | void fastcall __sched mutex_unlock(struct mutex *lock) | |
108 | { | |
109 | /* | |
110 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
111 | * into 'unlocked' state: | |
6053ee3b IM |
112 | */ |
113 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | |
114 | } | |
115 | ||
116 | EXPORT_SYMBOL(mutex_unlock); | |
117 | ||
118 | /* | |
119 | * Lock a mutex (possibly interruptible), slowpath: | |
120 | */ | |
121 | static inline int __sched | |
122 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |
123 | { | |
124 | struct task_struct *task = current; | |
125 | struct mutex_waiter waiter; | |
126 | unsigned int old_val; | |
127 | ||
128 | debug_mutex_init_waiter(&waiter); | |
129 | ||
130 | spin_lock_mutex(&lock->wait_lock); | |
131 | ||
132 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | |
133 | ||
134 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
135 | list_add_tail(&waiter.list, &lock->wait_list); | |
136 | waiter.task = task; | |
137 | ||
138 | for (;;) { | |
139 | /* | |
140 | * Lets try to take the lock again - this is needed even if | |
141 | * we get here for the first time (shortly after failing to | |
142 | * acquire the lock), to make sure that we get a wakeup once | |
143 | * it's unlocked. Later on, if we sleep, this is the | |
144 | * operation that gives us the lock. We xchg it to -1, so | |
145 | * that when we release the lock, we properly wake up the | |
146 | * other waiters: | |
147 | */ | |
148 | old_val = atomic_xchg(&lock->count, -1); | |
149 | if (old_val == 1) | |
150 | break; | |
151 | ||
152 | /* | |
153 | * got a signal? (This code gets eliminated in the | |
154 | * TASK_UNINTERRUPTIBLE case.) | |
155 | */ | |
156 | if (unlikely(state == TASK_INTERRUPTIBLE && | |
157 | signal_pending(task))) { | |
158 | mutex_remove_waiter(lock, &waiter, task->thread_info); | |
159 | spin_unlock_mutex(&lock->wait_lock); | |
160 | ||
161 | debug_mutex_free_waiter(&waiter); | |
162 | return -EINTR; | |
163 | } | |
164 | __set_task_state(task, state); | |
165 | ||
166 | /* didnt get the lock, go to sleep: */ | |
167 | spin_unlock_mutex(&lock->wait_lock); | |
168 | schedule(); | |
169 | spin_lock_mutex(&lock->wait_lock); | |
170 | } | |
171 | ||
172 | /* got the lock - rejoice! */ | |
173 | mutex_remove_waiter(lock, &waiter, task->thread_info); | |
174 | debug_mutex_set_owner(lock, task->thread_info __IP__); | |
175 | ||
176 | /* set it to 0 if there are no waiters left: */ | |
177 | if (likely(list_empty(&lock->wait_list))) | |
178 | atomic_set(&lock->count, 0); | |
179 | ||
180 | spin_unlock_mutex(&lock->wait_lock); | |
181 | ||
182 | debug_mutex_free_waiter(&waiter); | |
183 | ||
184 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | |
185 | DEBUG_WARN_ON(lock->owner != task->thread_info); | |
186 | ||
187 | return 0; | |
188 | } | |
189 | ||
190 | static void fastcall noinline __sched | |
191 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | |
192 | { | |
193 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
194 | ||
195 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | |
196 | } | |
197 | ||
198 | /* | |
199 | * Release the lock, slowpath: | |
200 | */ | |
201 | static fastcall noinline void | |
202 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |
203 | { | |
204 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
205 | ||
206 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | |
207 | ||
208 | spin_lock_mutex(&lock->wait_lock); | |
209 | ||
210 | /* | |
211 | * some architectures leave the lock unlocked in the fastpath failure | |
212 | * case, others need to leave it locked. In the later case we have to | |
213 | * unlock it here | |
214 | */ | |
215 | if (__mutex_slowpath_needs_to_unlock()) | |
216 | atomic_set(&lock->count, 1); | |
217 | ||
218 | debug_mutex_unlock(lock); | |
219 | ||
220 | if (!list_empty(&lock->wait_list)) { | |
221 | /* get the first entry from the wait-list: */ | |
222 | struct mutex_waiter *waiter = | |
223 | list_entry(lock->wait_list.next, | |
224 | struct mutex_waiter, list); | |
225 | ||
226 | debug_mutex_wake_waiter(lock, waiter); | |
227 | ||
228 | wake_up_process(waiter->task); | |
229 | } | |
230 | ||
231 | debug_mutex_clear_owner(lock); | |
232 | ||
233 | spin_unlock_mutex(&lock->wait_lock); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Here come the less common (and hence less performance-critical) APIs: | |
238 | * mutex_lock_interruptible() and mutex_trylock(). | |
239 | */ | |
240 | static int fastcall noinline __sched | |
241 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | |
242 | ||
243 | /*** | |
244 | * mutex_lock_interruptible - acquire the mutex, interruptable | |
245 | * @lock: the mutex to be acquired | |
246 | * | |
247 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
248 | * been acquired or sleep until the mutex becomes available. If a | |
249 | * signal arrives while waiting for the lock then this function | |
250 | * returns -EINTR. | |
251 | * | |
252 | * This function is similar to (but not equivalent to) down_interruptible(). | |
253 | */ | |
254 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |
255 | { | |
6053ee3b IM |
256 | return __mutex_fastpath_lock_retval |
257 | (&lock->count, __mutex_lock_interruptible_slowpath); | |
258 | } | |
259 | ||
260 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
261 | ||
262 | static int fastcall noinline __sched | |
263 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | |
264 | { | |
265 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
266 | ||
267 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | |
268 | } | |
269 | ||
270 | /* | |
271 | * Spinlock based trylock, we take the spinlock and check whether we | |
272 | * can get the lock: | |
273 | */ | |
274 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
275 | { | |
276 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
277 | int prev; | |
278 | ||
279 | spin_lock_mutex(&lock->wait_lock); | |
280 | ||
281 | prev = atomic_xchg(&lock->count, -1); | |
282 | if (likely(prev == 1)) | |
283 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | |
284 | /* Set it back to 0 if there are no waiters: */ | |
285 | if (likely(list_empty(&lock->wait_list))) | |
286 | atomic_set(&lock->count, 0); | |
287 | ||
288 | spin_unlock_mutex(&lock->wait_lock); | |
289 | ||
290 | return prev == 1; | |
291 | } | |
292 | ||
293 | /*** | |
294 | * mutex_trylock - try acquire the mutex, without waiting | |
295 | * @lock: the mutex to be acquired | |
296 | * | |
297 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
298 | * has been acquired successfully, and 0 on contention. | |
299 | * | |
300 | * NOTE: this function follows the spin_trylock() convention, so | |
301 | * it is negated to the down_trylock() return values! Be careful | |
302 | * about this when converting semaphore users to mutexes. | |
303 | * | |
304 | * This function must not be used in interrupt context. The | |
305 | * mutex must be released by the same task that acquired it. | |
306 | */ | |
307 | int fastcall mutex_trylock(struct mutex *lock) | |
308 | { | |
309 | return __mutex_fastpath_trylock(&lock->count, | |
310 | __mutex_trylock_slowpath); | |
311 | } | |
312 | ||
313 | EXPORT_SYMBOL(mutex_trylock); | |
314 | ||
315 | ||
316 |